OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
 
Loading...
Searching...
No Matches
main_vic_petsc_double.cpp
1
94#include "config.h"
95
96#ifdef HAVE_PETSC
97
98//#define SE_CLASS1
99//#define PRINT_STACKTRACE
100
102
103#include "interpolation/interpolation.hpp"
104#include "Grid/grid_dist_id.hpp"
105#include "Vector/vector_dist.hpp"
106#include "Matrix/SparseMatrix.hpp"
107#include "Vector/Vector.hpp"
108#include "FiniteDifference/FDScheme.hpp"
109#include "Solvers/petsc_solver.hpp"
110#include "interpolation/mp4_kernel.hpp"
111#include "Solvers/petsc_solver_AMG_report.hpp"
112
113constexpr int x = 0;
114constexpr int y = 1;
115constexpr int z = 2;
116
117// The type of the grids
119
120// The type of the particles
122
123// radius of the torus
124double ringr1 = 1.0;
125// radius of the core of the torus
126double sigma = 1.0/3.523;
127// Reynold number
128double tgtre = 7500.0;
129// Noise factor for the ring vorticity on z
130double ringnz = 0.01;
131
132// Kinematic viscosity
133double nu = 1.0/tgtre;
134
135// Time step
136double dt = 0.025;
137
138// All the properties by index
139constexpr unsigned int vorticity = 0;
140constexpr unsigned int velocity = 0;
141constexpr unsigned int p_vel = 1;
142constexpr int rhs_part = 2;
143constexpr unsigned int old_vort = 3;
144constexpr unsigned int old_pos = 4;
145
147
148template<typename grid> void calc_and_print_max_div_and_int(grid & g_vort)
149{
151
152 g_vort.template ghost_get<vorticity>();
153 auto it5 = g_vort.getDomainIterator();
154
155 double max_vort = 0.0;
156
157 double int_vort[3] = {0.0,0.0,0.0};
158
159 while (it5.isNext())
160 {
161 auto key = it5.get();
162
163 double tmp;
164
165 tmp = 1.0f/g_vort.spacing(x)/2.0f*(g_vort.template get<vorticity>(key.move(x,1))[x] - g_vort.template get<vorticity>(key.move(x,-1))[x] ) +
166 1.0f/g_vort.spacing(y)/2.0f*(g_vort.template get<vorticity>(key.move(y,1))[y] - g_vort.template get<vorticity>(key.move(y,-1))[y] ) +
167 1.0f/g_vort.spacing(z)/2.0f*(g_vort.template get<vorticity>(key.move(z,1))[z] - g_vort.template get<vorticity>(key.move(z,-1))[z] );
168
169 int_vort[x] += g_vort.template get<vorticity>(key)[x];
170 int_vort[y] += g_vort.template get<vorticity>(key)[y];
171 int_vort[z] += g_vort.template get<vorticity>(key)[z];
172
173 if (tmp > max_vort)
174 max_vort = tmp;
175
176 ++it5;
177 }
178
179 Vcluster & v_cl = create_vcluster();
180 v_cl.max(max_vort);
181 v_cl.sum(int_vort[0]);
182 v_cl.sum(int_vort[1]);
183 v_cl.sum(int_vort[2]);
184 v_cl.execute();
185
186 if (v_cl.getProcessUnitID() == 0)
187 {std::cout << "Max div for vorticity " << max_vort << " Integral: " << int_vort[0] << " " << int_vort[1] << " " << int_vort[2] << std::endl;}
188
190}
191
216
217/*
218 * gr is the grid where we are initializing the vortex ring
219 * domain is the simulation domain
220 *
221 */
222void init_ring(grid_type & gr, const Box<3,double> & domain)
223{
224 // To add some noise to the vortex ring we create two random
225 // vector
226 constexpr int nk = 32;
227
228 double ak[nk];
229 double bk[nk];
230
231 for (size_t i = 0 ; i < nk ; i++)
232 {
233 ak[i] = rand()/RAND_MAX;
234 bk[i] = rand()/RAND_MAX;
235 }
236
237 // We calculate the circuitation gamma
238 double gamma = nu * tgtre;
239 double rinv2 = 1.0f/(sigma*sigma);
240 double max_vorticity = gamma*rinv2/M_PI;
241
242 // We go through the grid to initialize the vortex
243 auto it = gr.getDomainIterator();
244
245 while (it.isNext())
246 {
247 auto key_d = it.get();
248 auto key = it.getGKey(key_d);
249
250 double tx = (key.get(x)-2)*gr.spacing(x) + domain.getLow(x);
251 double ty = (key.get(y)-2)*gr.spacing(y) + domain.getLow(y);
252 double tz = (key.get(z)-2)*gr.spacing(z) + domain.getLow(z);
253 double theta1 = atan2((ty-2.5f),(tz-2.5f));
254
255
256
257 double noise = 0.0f;
258 // for (int kk=1 ; kk < nk; kk++)
259 // noise = noise + sin(kk*(theta1+2.0f*M_PI*ak[kk])) + cos(kk*(theta1+2.0f*M_PI*bk[kk]));
260
261 double rad1r = sqrt((ty-2.5f)*(ty-2.5f) + (tz-2.5f)*(tz-2.5f)) - ringr1*(1.0f + ringnz * noise);
262 double rad1t = tx - 1.0f;
263 double rad1sq = rad1r*rad1r + rad1t*rad1t;
264 double radstr = -exp(-rad1sq*rinv2)*rinv2*gamma/M_PI;
265 gr.template get<vorticity>(key_d)[x] = 0.0f;
266 gr.template get<vorticity>(key_d)[y] = -radstr * cos(theta1);
267 gr.template get<vorticity>(key_d)[z] = radstr * sin(theta1);
268
269
270 ++it;
271 }
272}
273
275
277
278// Specification of the poisson equation for the helmotz-hodge projection
279// 3D (dims = 3). The field is a scalar value (nvar = 1), bournary are periodic
280// type of the the space is double. Final grid that will store \phi, the result (grid_dist_id<.....>)
281// The other indicate which kind of Matrix to use to construct the linear system and
282// which kind of vector to construct for the right hand side. Here we use a PETSC Sparse Matrix
283// and PETSC vector. NORMAL_GRID indicate that is a standard grid (non-staggered)
284struct poisson_nn_helm
285{
286 static const unsigned int dims = 3;
287 static const unsigned int nvar = 1;
288 static const bool boundary[];
289 typedef double stype;
293 static const int grid_type = NORMAL_GRID;
294};
295
296const bool poisson_nn_helm::boundary[] = {PERIODIC,PERIODIC,PERIODIC};
297
299
300
409/*
410 * gr vorticity grid where we apply the correction
411 * domain simulation domain
412 *
413 */
414void helmotz_hodge_projection(grid_type & gr, const Box<3,double> & domain, petsc_solver<double> & solver, petsc_solver<double>::return_type & x_ , bool init)
415{
417
419
420 // Convenient constant
421 constexpr unsigned int phi = 0;
422
423 // We define a field phi_f
424 typedef Field<phi,poisson_nn_helm> phi_f;
425
426 // We assemble the poisson equation doing the
427 // poisson of the Laplacian of phi using Laplacian
428 // central scheme (where the both derivative use central scheme, not
429 // forward composed backward like usually)
431
433
435
436 // ghost get
437 gr.template ghost_get<vorticity>();
438
439 // ghost size of the psi function
441
442 // Here we create a distributed grid to store the result of the helmotz projection
444
445 // Calculate the divergence of the vortex field
446 auto it = gr.getDomainIterator();
447
448 while (it.isNext())
449 {
450 auto key = it.get();
451
452 psi.template get<phi>(key) = 1.0f/gr.spacing(x)/2.0f*(gr.template get<vorticity>(key.move(x,1))[x] - gr.template get<vorticity>(key.move(x,-1))[x] ) +
453 1.0f/gr.spacing(y)/2.0f*(gr.template get<vorticity>(key.move(y,1))[y] - gr.template get<vorticity>(key.move(y,-1))[y] ) +
454 1.0f/gr.spacing(z)/2.0f*(gr.template get<vorticity>(key.move(z,1))[z] - gr.template get<vorticity>(key.move(z,-1))[z] );
455
456 ++it;
457 }
458
460
461
462 calc_and_print_max_div_and_int(gr);
463
464
466
467 // In order to create a matrix that represent the poisson equation we have to indicate
468 // we have to indicate the maximum extension of the stencil and we we need an extra layer
469 // of points in case we have to impose particular boundary conditions.
470 Ghost<3,long int> stencil_max(2);
471
472 // Here we define our Finite difference disctetization scheme object
473 FDScheme<poisson_nn_helm> fd(stencil_max, domain, psi);
474
475 // impose the poisson equation, using right hand side psi on the full domain (psi.getDomainIterator)
476 // the template paramereter instead define which property of phi carry the righ-hand-side
477 // in this case phi has only one property, so the property 0 carry the right hand side
478 fd.template impose_dit<0>(poisson(),psi,psi.getDomainIterator());
479
481
483
484 timer tm_solve;
485 if (init == true)
486 {
487 // Set the conjugate-gradient as method to solve the linear solver
488 solver.setSolver(KSPBCGS);
489
490 // Set the absolute tolerance to determine that the method is converged
491 solver.setAbsTol(0.001);
492
493 // Set the maximum number of iterations
494 solver.setMaxIter(500);
495
496 solver.setPreconditioner(PCHYPRE_BOOMERAMG);
497 solver.setPreconditionerAMG_nl(6);
498 solver.setPreconditionerAMG_maxit(1);
499 solver.setPreconditionerAMG_relax("SOR/Jacobi");
500 solver.setPreconditionerAMG_cycleType("V",0,4);
501 solver.setPreconditionerAMG_coarsenNodalType(0);
502 solver.setPreconditionerAMG_coarsen("HMIS");
503
504 tm_solve.start();
505
506 // Give to the solver A and b, return x, the solution
507 solver.solve(fd.getA(),x_,fd.getB());
508
509 tm_solve.stop();
510 }
511 else
512 {
513 tm_solve.start();
514 solver.solve(x_,fd.getB());
515 tm_solve.stop();
516 }
517
518 // copy the solution x to the grid psi
519 fd.template copy<phi>(x_,psi);
520
522
524
525 psi.template ghost_get<phi>();
526
527 // Correct the vorticity to make it divergence free
528
529 auto it2 = gr.getDomainIterator();
530
531 while (it2.isNext())
532 {
533 auto key = it2.get();
534
535 gr.template get<vorticity>(key)[x] -= 1.0f/2.0f/psi.spacing(x)*(psi.template get<phi>(key.move(x,1))-psi.template get<phi>(key.move(x,-1)));
536 gr.template get<vorticity>(key)[y] -= 1.0f/2.0f/psi.spacing(y)*(psi.template get<phi>(key.move(y,1))-psi.template get<phi>(key.move(y,-1)));
537 gr.template get<vorticity>(key)[z] -= 1.0f/2.0f/psi.spacing(z)*(psi.template get<phi>(key.move(z,1))-psi.template get<phi>(key.move(z,-1)));
538
539 ++it2;
540 }
541
543
544 calc_and_print_max_div_and_int(gr);
545}
546
547
564
565void remesh(particles_type & vd, grid_type & gr,Box<3,double> & domain)
566{
567 // Remove all particles because we reinitialize in a grid like position
568 vd.clear();
569
570 // Get a grid iterator
571 auto git = vd.getGridIterator(gr.getGridInfo().getSize());
572
573 // For each grid point
574 while (git.isNext())
575 {
576 // Get the grid point in global coordinates (key). p is in local coordinates
577 auto p = git.get();
578 auto key = git.get_dist();
579
580 // Add the particle
581 vd.add();
582
583 // Assign the position to the particle in a grid like way
584 vd.getLastPos()[x] = gr.spacing(x)*p.get(x) + domain.getLow(x);
585 vd.getLastPos()[y] = gr.spacing(y)*p.get(y) + domain.getLow(y);
586 vd.getLastPos()[z] = gr.spacing(z)*p.get(z) + domain.getLow(z);
587
588 // Initialize the vorticity
589 vd.template getLastProp<vorticity>()[x] = gr.template get<vorticity>(key)[x];
590 vd.template getLastProp<vorticity>()[y] = gr.template get<vorticity>(key)[y];
591 vd.template getLastProp<vorticity>()[z] = gr.template get<vorticity>(key)[z];
592
593 // next grid point
594 ++git;
595 }
596
597 // redistribute the particles
598 vd.map();
599}
600
602
603
656void comp_vel(Box<3,double> & domain, grid_type & g_vort,grid_type & g_vel, petsc_solver<double>::return_type (& phi_s)[3],petsc_solver<double> & solver)
657{
659
660 // Convenient constant
661 constexpr unsigned int phi = 0;
662
663 // We define a field phi_f
664 typedef Field<phi,poisson_nn_helm> phi_f;
665
666 // We assemble the poisson equation doing the
667 // poisson of the Laplacian of phi using Laplacian
668 // central scheme (where the both derivative use central scheme, not
669 // forward composed backward like usually)
671
673
674 // Maximum size of the stencil
676
677 // Here we create a distributed grid to store the result
680
681 // We calculate and print the maximum divergence of the vorticity
682 // and the integral of it
683 calc_and_print_max_div_and_int(g_vort);
684
685 // For each component solve a poisson
686 for (size_t i = 0 ; i < 3 ; i++)
687 {
689
690 // Copy the vorticity component i in gr_ps
691 auto it2 = gr_ps.getDomainIterator();
692
693 // calculate the velocity from the curl of phi
694 while (it2.isNext())
695 {
696 auto key = it2.get();
697
698 // copy
699 gr_ps.get<vorticity>(key) = g_vort.template get<vorticity>(key)[i];
700
701 ++it2;
702 }
703
704 // Maximum size of the stencil
705 Ghost<3,long int> stencil_max(2);
706
707 // Finite difference scheme
708 FDScheme<poisson_nn_helm> fd(stencil_max, domain, gr_ps);
709
710 // impose the poisson equation using gr_ps = vorticity for the right-hand-side (on the full domain)
711 fd.template impose_dit<phi>(poisson(),gr_ps,gr_ps.getDomainIterator());
712
713 solver.setAbsTol(0.01);
714
715 // Get the vector that represent the right-hand-side
716 Vector<double,PETSC_BASE> & b = fd.getB();
717
718 timer tm_solve;
719 tm_solve.start();
720
721 // Give to the solver A and b in this case we are giving
722 // an intitial guess phi_s calculated in the previous
723 // time step
724 solver.solve(phi_s[i],b);
725
726 tm_solve.stop();
727
728 // Calculate the residual
729
730 solError serr;
731 serr = solver.get_residual_error(phi_s[i],b);
732
733 Vcluster & v_cl = create_vcluster();
734 if (v_cl.getProcessUnitID() == 0)
735 {std::cout << "Solved component " << i << " Error: " << serr.err_inf << std::endl;}
736
737 // copy the solution to grid
738 fd.template copy<phi>(phi_s[i],gr_ps);
739
741
743
744 auto it3 = gr_ps.getDomainIterator();
745
746 // calculate the velocity from the curl of phi
747 while (it3.isNext())
748 {
749 auto key = it3.get();
750
751 phi_v.get<velocity>(key)[i] = gr_ps.get<phi>(key);
752
753 ++it3;
754 }
755
757 }
758
760
761 phi_v.ghost_get<phi>();
762
763 double inv_dx = 0.5f / g_vort.spacing(0);
764 double inv_dy = 0.5f / g_vort.spacing(1);
765 double inv_dz = 0.5f / g_vort.spacing(2);
766
767 auto it3 = phi_v.getDomainIterator();
768
769 // calculate the velocity from the curl of phi
770 while (it3.isNext())
771 {
772 auto key = it3.get();
773
774 double phi_xy = (phi_v.get<phi>(key.move(y,1))[x] - phi_v.get<phi>(key.move(y,-1))[x])*inv_dy;
775 double phi_xz = (phi_v.get<phi>(key.move(z,1))[x] - phi_v.get<phi>(key.move(z,-1))[x])*inv_dz;
776 double phi_yx = (phi_v.get<phi>(key.move(x,1))[y] - phi_v.get<phi>(key.move(x,-1))[y])*inv_dx;
777 double phi_yz = (phi_v.get<phi>(key.move(z,1))[y] - phi_v.get<phi>(key.move(z,-1))[y])*inv_dz;
778 double phi_zx = (phi_v.get<phi>(key.move(x,1))[z] - phi_v.get<phi>(key.move(x,-1))[z])*inv_dx;
779 double phi_zy = (phi_v.get<phi>(key.move(y,1))[z] - phi_v.get<phi>(key.move(y,-1))[z])*inv_dy;
780
781 g_vel.template get<velocity>(key)[x] = phi_zy - phi_yz;
782 g_vel.template get<velocity>(key)[y] = phi_xz - phi_zx;
783 g_vel.template get<velocity>(key)[z] = phi_yx - phi_xy;
784
785 ++it3;
786 }
787
789}
790
791
792template<unsigned int prp> void set_zero(grid_type & gr)
793{
794 auto it = gr.getDomainGhostIterator();
795
796 // calculate the velocity from the curl of phi
797 while (it.isNext())
798 {
799 auto key = it.get();
800
801 gr.template get<prp>(key)[0] = 0.0;
802 gr.template get<prp>(key)[1] = 0.0;
803 gr.template get<prp>(key)[2] = 0.0;
804
805 ++it;
806 }
807}
808
809
810template<unsigned int prp> void set_zero(particles_type & vd)
811{
812 auto it = vd.getDomainIterator();
813
814 // calculate the velocity from the curl of phi
815 while (it.isNext())
816 {
817 auto key = it.get();
818
819 vd.template getProp<prp>(key)[0] = 0.0;
820 vd.template getProp<prp>(key)[1] = 0.0;
821 vd.template getProp<prp>(key)[2] = 0.0;
822
823 ++it;
824 }
825}
826
841
842// Calculate the right hand side of the vorticity formulation
843template<typename grid> void calc_rhs(grid & g_vort, grid & g_vel, grid & g_dwp)
844{
845 // usefull constant
846 constexpr int rhs = 0;
847
848 // calculate several pre-factors for the stencil finite
849 // difference
850 double fac1 = 2.0f*nu/(g_vort.spacing(0));
851 double fac2 = 2.0f*nu/(g_vort.spacing(1));
852 double fac3 = 2.0f*nu/(g_vort.spacing(2));
853
854 double fac4 = 0.5f/(g_vort.spacing(0));
855 double fac5 = 0.5f/(g_vort.spacing(1));
856 double fac6 = 0.5f/(g_vort.spacing(2));
857
858 auto it = g_dwp.getDomainIterator();
859
860 g_vort.template ghost_get<vorticity>();
861
862 while (it.isNext())
863 {
864 auto key = it.get();
865
866 g_dwp.template get<rhs>(key)[x] = /*fac1*(g_vort.template get<vorticity>(key.move(x,1))[x]+g_vort.template get<vorticity>(key.move(x,-1))[x])+
867 fac2*(g_vort.template get<vorticity>(key.move(y,1))[x]+g_vort.template get<vorticity>(key.move(y,-1))[x])+
868 fac3*(g_vort.template get<vorticity>(key.move(z,1))[x]+g_vort.template get<vorticity>(key.move(z,-1))[x])-
869 2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[x]+*/
870 fac4*g_vort.template get<vorticity>(key)[x]*
871 (g_vel.template get<velocity>(key.move(x,1))[x] - g_vel.template get<velocity>(key.move(x,-1))[x])+
872 fac5*g_vort.template get<vorticity>(key)[y]*
873 (g_vel.template get<velocity>(key.move(y,1))[x] - g_vel.template get<velocity>(key.move(y,-1))[x])+
874 fac6*g_vort.template get<vorticity>(key)[z]*
875 (g_vel.template get<velocity>(key.move(z,1))[x] - g_vel.template get<velocity>(key.move(z,-1))[x]);
876
877 g_dwp.template get<rhs>(key)[y] = /*fac1*(g_vort.template get<vorticity>(key.move(x,1))[y]+g_vort.template get<vorticity>(key.move(x,-1))[y])+
878 fac2*(g_vort.template get<vorticity>(key.move(y,1))[y]+g_vort.template get<vorticity>(key.move(y,-1))[y])+
879 fac3*(g_vort.template get<vorticity>(key.move(z,1))[y]+g_vort.template get<vorticity>(key.move(z,-1))[y])-
880 2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[y]+*/
881 fac4*g_vort.template get<vorticity>(key)[x]*
882 (g_vel.template get<velocity>(key.move(x,1))[y] - g_vel.template get<velocity>(key.move(x,-1))[y])+
883 fac5*g_vort.template get<vorticity>(key)[y]*
884 (g_vel.template get<velocity>(key.move(y,1))[y] - g_vel.template get<velocity>(key.move(y,-1))[y])+
885 fac6*g_vort.template get<vorticity>(key)[z]*
886 (g_vel.template get<velocity>(key.move(z,1))[y] - g_vel.template get<velocity>(key.move(z,-1))[y]);
887
888
889 g_dwp.template get<rhs>(key)[z] = /*fac1*(g_vort.template get<vorticity>(key.move(x,1))[z]+g_vort.template get<vorticity>(key.move(x,-1))[z])+
890 fac2*(g_vort.template get<vorticity>(key.move(y,1))[z]+g_vort.template get<vorticity>(key.move(y,-1))[z])+
891 fac3*(g_vort.template get<vorticity>(key.move(z,1))[z]+g_vort.template get<vorticity>(key.move(z,-1))[z])-
892 2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[z]+*/
893 fac4*g_vort.template get<vorticity>(key)[x]*
894 (g_vel.template get<velocity>(key.move(x,1))[z] - g_vel.template get<velocity>(key.move(x,-1))[z])+
895 fac5*g_vort.template get<vorticity>(key)[y]*
896 (g_vel.template get<velocity>(key.move(y,1))[z] - g_vel.template get<velocity>(key.move(y,-1))[z])+
897 fac6*g_vort.template get<vorticity>(key)[z]*
898 (g_vel.template get<velocity>(key.move(z,1))[z] - g_vel.template get<velocity>(key.move(z,-1))[z]);
899
900 ++it;
901 }
902}
903
905
925
926void rk_step1(particles_type & particles)
927{
928 auto it = particles.getDomainIterator();
929
930 while (it.isNext())
931 {
932 auto key = it.get();
933
934 // Save the old vorticity
935 particles.getProp<old_vort>(key)[x] = particles.getProp<vorticity>(key)[x];
936 particles.getProp<old_vort>(key)[y] = particles.getProp<vorticity>(key)[y];
937 particles.getProp<old_vort>(key)[z] = particles.getProp<vorticity>(key)[z];
938
939 particles.getProp<vorticity>(key)[x] = particles.getProp<vorticity>(key)[x] + 0.5f * dt * particles.getProp<rhs_part>(key)[x];
940 particles.getProp<vorticity>(key)[y] = particles.getProp<vorticity>(key)[y] + 0.5f * dt * particles.getProp<rhs_part>(key)[y];
941 particles.getProp<vorticity>(key)[z] = particles.getProp<vorticity>(key)[z] + 0.5f * dt * particles.getProp<rhs_part>(key)[z];
942
943 ++it;
944 }
945
946 auto it2 = particles.getDomainIterator();
947
948 while (it2.isNext())
949 {
950 auto key = it2.get();
951
952 // Save the old position
953 particles.getProp<old_pos>(key)[x] = particles.getPos(key)[x];
954 particles.getProp<old_pos>(key)[y] = particles.getPos(key)[y];
955 particles.getProp<old_pos>(key)[z] = particles.getPos(key)[z];
956
957 particles.getPos(key)[x] = particles.getPos(key)[x] + 0.5f * dt * particles.getProp<p_vel>(key)[x];
958 particles.getPos(key)[y] = particles.getPos(key)[y] + 0.5f * dt * particles.getProp<p_vel>(key)[y];
959 particles.getPos(key)[z] = particles.getPos(key)[z] + 0.5f * dt * particles.getProp<p_vel>(key)[z];
960
961 ++it2;
962 }
963
964 particles.map();
965}
966
968
988
989void rk_step2(particles_type & particles)
990{
991 auto it = particles.getDomainIterator();
992
993 while (it.isNext())
994 {
995 auto key = it.get();
996
997 particles.getProp<vorticity>(key)[x] = particles.getProp<old_vort>(key)[x] + dt * particles.getProp<rhs_part>(key)[x];
998 particles.getProp<vorticity>(key)[y] = particles.getProp<old_vort>(key)[y] + dt * particles.getProp<rhs_part>(key)[y];
999 particles.getProp<vorticity>(key)[z] = particles.getProp<old_vort>(key)[z] + dt * particles.getProp<rhs_part>(key)[z];
1000
1001 ++it;
1002 }
1003
1004 auto it2 = particles.getDomainIterator();
1005
1006 while (it2.isNext())
1007 {
1008 auto key = it2.get();
1009
1010 particles.getPos(key)[x] = particles.getProp<old_pos>(key)[x] + dt * particles.getProp<p_vel>(key)[x];
1011 particles.getPos(key)[y] = particles.getProp<old_pos>(key)[y] + dt * particles.getProp<p_vel>(key)[y];
1012 particles.getPos(key)[z] = particles.getProp<old_pos>(key)[z] + dt * particles.getProp<p_vel>(key)[z];
1013
1014 ++it2;
1015 }
1016
1017 particles.map();
1018}
1019
1020
1021
1023
1044template<typename grid, typename vector> void do_step(vector & particles,
1045 grid & g_vort,
1046 grid & g_vel,
1047 grid & g_dvort,
1048 Box<3,double> & domain,
1051 petsc_solver<double> & solver)
1052{
1053 constexpr int rhs = 0;
1054
1056
1057 set_zero<vorticity>(g_vort);
1058 inte.template p2m<vorticity,vorticity>(particles,g_vort);
1059
1060 g_vort.template ghost_put<add_,vorticity>();
1061
1063
1065
1066 // Calculate velocity from vorticity
1067 comp_vel(domain,g_vort,g_vel,phi_s,solver);
1068 calc_rhs(g_vort,g_vel,g_dvort);
1069
1071
1073
1074 g_dvort.template ghost_get<rhs>();
1075 g_vel.template ghost_get<velocity>();
1078 inte.template m2p<rhs,rhs_part>(g_dvort,particles);
1079 inte.template m2p<velocity,p_vel>(g_vel,particles);
1080
1082}
1083
1084template<typename vector, typename grid> void check_point_and_save(vector & particles,
1085 grid & g_vort,
1086 grid & g_vel,
1087 grid & g_dvort,
1088 size_t i)
1089{
1090 Vcluster & v_cl = create_vcluster();
1091
1092 if (v_cl.getProcessingUnits() < 24)
1093 {
1094 particles.write("part_out_" + std::to_string(i),VTK_WRITER | FORMAT_BINARY);
1095 g_vort.template ghost_get<vorticity>();
1096 g_vel.template ghost_get<velocity>();
1097 g_vel.write("grid_velocity_" + std::to_string(i), VTK_WRITER | FORMAT_BINARY);
1098 g_vort.write("grid_vorticity_" + std::to_string(i), VTK_WRITER | FORMAT_BINARY);
1099 }
1100
1101 // In order to reduce the size of the saved data we apply a threshold.
1102 // We only save particles with vorticity higher than 0.1
1104
1105 auto it_s = particles.getDomainIterator();
1106
1107 while (it_s.isNext())
1108 {
1109 auto p = it_s.get();
1110
1111 double vort_magn = sqrt(particles.template getProp<vorticity>(p)[0] * particles.template getProp<vorticity>(p)[0] +
1112 particles.template getProp<vorticity>(p)[1] * particles.template getProp<vorticity>(p)[1] +
1113 particles.template getProp<vorticity>(p)[2] * particles.template getProp<vorticity>(p)[2]);
1114
1115 if (vort_magn < 0.1)
1116 {
1117 ++it_s;
1118 continue;
1119 }
1120
1121 part_save.add();
1122
1123 part_save.getLastPos()[0] = particles.getPos(p)[0];
1124 part_save.getLastPos()[1] = particles.getPos(p)[1];
1125 part_save.getLastPos()[2] = particles.getPos(p)[2];
1126
1127 part_save.template getLastProp<0>() = vort_magn;
1128
1129 ++it_s;
1130 }
1131
1132 part_save.map();
1133
1134 // Save and HDF5 file for checkpoint restart
1135 particles.save("check_point");
1136}
1137
1156int main(int argc, char* argv[])
1157{
1158 // Initialize
1159 openfpm_init(&argc,&argv);
1160 {
1161 // Domain, a rectangle
1162 Box<3,double> domain({0.0,0.0,0.0},{22.0,5.57,5.57});
1163
1164 // Ghost (Not important in this case but required)
1165 Ghost<3,long int> g(2);
1166
1167 // Grid points on x=128 y=64 z=64
1168 long int sz[] = {512,64,64};
1169 size_t szu[] = {(size_t)sz[0],(size_t)sz[1],(size_t)sz[2]};
1170
1171 periodicity<3> bc = {{PERIODIC,PERIODIC,PERIODIC}};
1172
1173 grid_type g_vort(szu,domain,g,bc);
1174 grid_type g_vel(g_vort.getDecomposition(),szu,g);
1175 grid_type g_dvort(g_vort.getDecomposition(),szu,g);
1176 particles_type particles(g_vort.getDecomposition(),0);
1177
1178 // It store the solution to compute velocity
1179 // It is used as initial guess every time we call the solver
1182
1183 // Parallel object
1184 Vcluster & v_cl = create_vcluster();
1185
1186 // print out the spacing of the grid
1187 if (v_cl.getProcessUnitID() == 0)
1188 {std::cout << "SPACING " << g_vort.spacing(0) << " " << g_vort.spacing(1) << " " << g_vort.spacing(2) << std::endl;}
1189
1190 // initialize the ring step 1
1191 init_ring(g_vort,domain);
1192
1193 x_.resize(g_vort.size(),g_vort.getLocalDomainSize());
1194 x_.setZero();
1195
1196 // Create a PETSC solver to get the solution x
1197 petsc_solver<double> solver;
1198
1199 // Do the helmotz projection step 2
1200 helmotz_hodge_projection(g_vort,domain,solver,x_,true);
1201
1202 // initialize the particle on a mesh step 3
1203 remesh(particles,g_vort,domain);
1204
1205 // Get the total number of particles
1206 size_t tot_part = particles.size_local();
1207 v_cl.sum(tot_part);
1208 v_cl.execute();
1209
1210 // Now we set the size of phi_s
1211 phi_s[0].resize(g_vort.size(),g_vort.getLocalDomainSize());
1212 phi_s[1].resize(g_vort.size(),g_vort.getLocalDomainSize());
1213 phi_s[2].resize(g_vort.size(),g_vort.getLocalDomainSize());
1214
1215 // And we initially set it to zero
1216 phi_s[0].setZero();
1217 phi_s[1].setZero();
1218 phi_s[2].setZero();
1219
1220 // We create the interpolation object outside the loop cycles
1221 // to avoid to recreate it at every cycle. Internally this object
1222 // create some data-structure to optimize the conversion particle
1223 // position to sub-domain. If there are no re-balancing it is safe
1224 // to reuse-it
1226
1227 // With more than 24 core we rely on the HDF5 checkpoint restart file
1228 if (v_cl.getProcessingUnits() < 24)
1229 {g_vort.write("grid_vorticity_init", VTK_WRITER | FORMAT_BINARY);}
1230
1231
1232 // Before entering the loop we check if we want to restart from
1233 // a previous simulation
1234 size_t i = 0;
1235
1236 // if we have an argument
1237 if (argc > 1)
1238 {
1239 // take that argument
1240 std::string restart(argv[1]);
1241
1242 // convert it into number
1243 i = std::stoi(restart);
1244
1245 // load the file
1246 particles.load("check_point_" + std::to_string(i));
1247
1248 // Print to inform that we are restarting from a
1249 // particular position
1250 if (v_cl.getProcessUnitID() == 0)
1251 {std::cout << "Restarting from " << i << std::endl;}
1252 }
1253
1254 // Time Integration
1255 for ( ; i < 10001 ; i++)
1256 {
1257 // do step 4-5-6-7
1258 do_step(particles,g_vort,g_vel,g_dvort,domain,inte,phi_s,solver);
1259
1260 // do step 8
1261 rk_step1(particles);
1262
1263 // do step 9-10-11-12
1264 do_step(particles,g_vort,g_vel,g_dvort,domain,inte,phi_s,solver);
1265
1266 // do step 13
1267 rk_step2(particles);
1268
1269 // so step 14
1270 set_zero<vorticity>(g_vort);
1271 inte.template p2m<vorticity,vorticity>(particles,g_vort);
1272 g_vort.template ghost_put<add_,vorticity>();
1273
1274 // helmotz-hodge projection
1275 helmotz_hodge_projection(g_vort,domain,solver,x_,false);
1276
1277 remesh(particles,g_vort,domain);
1278
1279 // print the step number
1280 if (v_cl.getProcessUnitID() == 0)
1281 {std::cout << "Step " << i << std::endl;}
1282
1283 // every 100 steps write the output
1284 if (i % 100 == 0) {check_point_and_save(particles,g_vort,g_vel,g_dvort,i);}
1285
1286 }
1287 }
1288
1289 openfpm_finalize();
1290}
1291
1292
1293#else
1294
1295int main(int argc, char* argv[])
1296{
1297 return 0;
1298}
1299
1300#endif
This class represent an N-dimensional box.
Definition Box.hpp:61
Finite Differences.
Definition FDScheme.hpp:127
Definition eq.hpp:83
Laplacian second order on h (spacing)
Definition Laplacian.hpp:23
Sparse Matrix implementation.
void execute()
Execute all the requests.
void sum(T &num)
Sum the numbers across all processors and get the result.
size_t getProcessUnitID()
Get the process unit id.
size_t getProcessingUnits()
Get the total number of processors.
void max(T &num)
Get the maximum number across all processors (or reduction with infinity norm)
Implementation of VCluster class.
Definition VCluster.hpp:59
Sparse Matrix implementation stub object when OpenFPM is compiled with no linear algebra support.
Definition Vector.hpp:40
void resize(size_t row, size_t row_n)
stub resize
Definition Vector.hpp:97
This is a distributed grid.
Decomposition & getDecomposition()
Get the object that store the information about the decomposition.
const grid_sm< dim, T > & getGridInfo() const
Get an object containing the grid informations.
grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_iterator()), FIXED > getDomainGhostIterator() const
It return an iterator that span the grid domain + ghost part.
St spacing(size_t i) const
Get the spacing of the grid in direction i.
grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_subiterator()), FREE > getDomainIterator() const
It return an iterator that span the full grid domain (each processor span its local domain)
__device__ __host__ const size_t(& getSize() const)[N]
Return the size of the grid as an array.
Definition grid_sm.hpp:760
Main class for interpolation Particle to mest p2m and Mesh to particle m2p.
In case T does not match the PETSC precision compilation create a stub structure.
static Vector< T > solve(const SparseMatrix< T, impl > &A, const Vector< T > &b)
Solve the linear system.
Class for cpu time benchmarking.
Definition timer.hpp:28
void stop()
Stop the timer.
Definition timer.hpp:119
void start()
Start the timer.
Definition timer.hpp:90
vect_dist_key_dx get()
Get the actual key.
Distributed vector.
size_t size_local() const
return the local size of the vector
auto getProp(vect_dist_key_dx vec_key) -> decltype(v_prp.template get< id >(vec_key.getKey()))
Get the property of an element.
grid_dist_id_iterator_dec< Decomposition > getGridIterator(const size_t(&sz)[dim])
auto getPos(vect_dist_key_dx vec_key) -> decltype(v_pos.template get< 0 >(vec_key.getKey()))
Get the position of an element.
vector_dist_iterator getDomainIterator() const
Get an iterator that traverse the particles in the domain.
void map(size_t opt=NONE)
It move all the particles that does not belong to the local processor to the respective processor.
void clear()
remove all the elements
bool write(std::string out, int opt=VTK_WRITER)
Output particle position and properties.
auto getLastPos() -> decltype(v_pos.template get< 0 >(0))
Get the position of the last element.
void save(const std::string &filename) const
Save the distributed vector on HDF5 file.
void add()
Add local particle.
void load(const std::string &filename)
Load the distributed vector from an HDF5 file.
Decomposition & getDecomposition()
Get the decomposition.
Boundary conditions.
Definition common.hpp:22
Vector< double, PETSC_BASE > Vector_type
type of vector that store the solution
SparseMatrix< double, int, PETSC_BASE > SparseMatrix_type
type of sparse grid that store the Matrix A
static const bool boundary[]
specify the boundary conditions
static const unsigned int nvar
We have only one scalar unknown.
static const unsigned int dims
Number of dimansions of the problem.
grid_dist_id< 3, float, aggregate< float > > b_grid
Grid that store the solution.
Meta-function to return a compatible zero-element.
It contain statistic of the error of the calculated solution.
PetscReal err_inf
infinity norm of the error