OpenFPM  5.2.0
Project that contain the implementation of distributed structures
sphereR1_EucLap.cpp
1 // ------------------------------------------------------------------------------------------
2 // ActiveGelsOnDeformableSurfaces
3 // Copyright (C) 2020 ---- foggia
4 //
5 // This file is part of the ActiveGelsOnDeformableSurfaces software.
6 //
7 // This program is free software: you can redistribute it and/or modify
8 // it under the terms of the GNU General Public License as published by
9 // the Free Software Foundation, either version 3 of the License, or
10 // (at your option) any later version.
11 //
12 // This program is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
16 //
17 // You should have received a copy of the GNU General Public License
18 // along with this program. If not, see <http://www.gnu.org/licenses/>.
19 // ------------------------------------------------------------------------------------------
20 //
21 // Created by foggia on 22-04-12
22 //
23 
24 // Include standard library files
25 #include <iostream>
26 
27 // Include OpenFPM files
28 #include "Grid/grid_dist_id.hpp"
29 #include "Vector/vector_dist.hpp"
30 #include "FiniteDifference/FD_op.hpp"
32 #include <util/PathsAndFiles.hpp>
33 #include "interpolation/interpolation.hpp"
34 #include "interpolation/mp4_kernel.hpp"
35 
36 // Include own files
37 #include "auxFunc.hpp"
38 #include "spatial_discretization/finite_difference/derivative.hpp"
39 #include "interp_kernel/m4_kernel.hpp"
40 
41 // ------------------------------------------------------------------------------------------
42 // Main function
43 // ------------------------------------------------------------------------------------------
44 
45 int main(int argc, char * argv[]) {
46 
47  openfpm_init(&argc,&argv);
48  Vcluster<> & v_cl = create_vcluster();
49 
50  timer ttotal, tcp, tExtQ, tExtRHS, tLap, tAll;
51  const size_t mpiSize{v_cl.size()};
52 
53  const int DIM{3};
54  const int SDF{0};
55  const int CP{1};
56  const int QTY{2};
57  const int ANALYTLB{3};
58  const int NUMLB{4};
59  const int ABSERR{5};
60  const int TMP_EXT_SCAL{6};
61 
62  const int NUMLB_PART{0};
63  const int ANALYTLB_PART{1};
64  const int ABSERR_PART{2};
65 
66  // Writing directory
67  std::string cwd{boost::filesystem::current_path().string()};
68  std::string output_dir{cwd + "/output_sphereR1_CartLap"};
70  std::string g_output;
71  std::ofstream norms_file, normsParticles_file, timesCPU_file, timesWCT_file;
72  if (v_cl.rank() == 0) {
73  norms_file.open(output_dir + "/norms_EucLap_mpiSize_" + std::to_string(mpiSize) + ".csv", std::ios_base::app);
74  normsParticles_file.open(output_dir + "/normsParticles_EucLap_mpiSize_" + std::to_string(mpiSize) + ".csv", std::ios_base::app);
75  timesCPU_file.open(output_dir + "/timesCPU_EucLap_mpiSize_" + std::to_string(mpiSize) + ".csv", std::ios_base::app);
76  timesWCT_file.open(output_dir + "/timesWCT_EucLap_mpiSize_" + std::to_string(mpiSize) + ".csv", std::ios_base::app);
77  if (norms_file.is_open() == false or normsParticles_file.is_open() == false or timesCPU_file.is_open() == false or timesWCT_file.is_open() == false) {
78  std::cout << "ERROR: File(s) is(are) not open.\n";
79  return 1;
80  }
81  norms_file << "#N L2 Linf" << std::endl;
82  normsParticles_file << "#N L2 Linf" << std::endl;
83  timesCPU_file << "#N tcp tExtQ tAll tExtRHS tLap ttotal" << std::endl;
84  timesWCT_file << "#N tcp tExtQ tAll tExtRHS tLap ttotal" << std::endl;
85  }
86 
87  // Surface
88  const double radius{1.0};
89  const std::array<double,DIM> center{0.0,0.0,0.0};
90  const int nb_width{10};
91  const int nb_half_width{nb_width/2};
92  const int POLYORD{3};
93  const int POLYORD_EXT{3};
94 
95  // Domain
96  // double boxP1{-1.3}, boxP2{1.3};
97  double boxP1{-2.0}, boxP2{2.0};
98  double boxSize{boxP2-boxP1};
99  Box<DIM,double> domain{{boxP1,boxP1,boxP1},{boxP2,boxP2,boxP2}};
100  Ghost<DIM,long int> ghost{20};
101  periodicity<DIM> bc{{NON_PERIODIC,NON_PERIODIC,NON_PERIODIC}};
104  openfpm::vector<std::string> GpropNames{"phiSDF","cp","qty","analytLB","numLB","absErr","tmp_ext_scal"};
105 
106  // Particles on surface
107  typedef aggregate<double,double,double> prop_part;
108  openfpm::vector<std::string> PpropNames{"numLB","analytLB","absErr"};
110 
111  std::vector<grid_dist_key_dx<grid_type::dims,grid_key_dx<grid_type::dims>>> nb_keys, nb_keys_error;
112 
113  int n[7] = {73,87,110,140,178,211,250};
114  int n_part[7] = {3600,6000,12000,24000,48000,80000,128000};
115  // int n[7] = {23,31,41,49,59,67,77};
116  // int n[2] = {41,53};
117  // int n[6] = {21,41,81,161,321,641};
118 
119  // Loop over different grid sizes
120  for (int i = 6; i < 7; ++i) {
121 
122  ttotal.reset();
123  tcp.reset();
124  tExtQ.reset();
125  tExtRHS.reset();
126  tAll.reset();
127  tLap.reset();
128 
129  g_output = {output_dir + "/sphereR1_SurfLap_" + std::to_string(n[i]) + "_mpiSize_" + std::to_string(mpiSize)};
130 
131  // Grid creation
132  size_t sz[DIM] = {n[i],n[i],n[i]};
133  grid_type grid{sz,domain,ghost,bc};
134  grid.setPropNames(GpropNames);
135 
136  // Particle creation
137  vector_type particlesSurf{grid.getDecomposition(),0};
138  particlesSurf.setPropNames(PpropNames);
139 
140  std::array<double,DIM> coord;
141  // const int n_part{4*n[i]};
142  if (v_cl.rank() == 0) {
143 
144  // Created using the Fibonacci sphere algorithm
145  const double pi{3.14159265358979323846};
146  const double golden_ang = pi*(3.0 - std::sqrt(5.0));
147  const double prefactor{3.0/16.0 * std::sqrt(1.0/pi)};
148  double rad, theta, arg, thetaB, cos2;
149 
150  for (int j = 0; j < n_part[i]; ++j) {
151 
152  coord[1] = 1.0 - 2.0*(j/double(n_part[i]-1));
153  rad = std::sqrt(1.0 - (coord[1]-center[1])*(coord[1]-center[1]));
154  theta = golden_ang * j;
155  coord[0] = std::cos(theta) * rad;
156  coord[2] = std::sin(theta) * rad;
157 
158  arg = (coord[0]-center[0]) * (coord[0]-center[0]) + (coord[1]-center[1]) * (coord[1]-center[1]);
159  thetaB = std::atan2(std::sqrt(arg),(coord[2]-center[2]));
160  cos2 = std::cos(thetaB)*std::cos(thetaB);
161 
162  particlesSurf.add();
163  particlesSurf.getLastPos()[0] = coord[0];
164  particlesSurf.getLastPos()[1] = coord[1];
165  particlesSurf.getLastPos()[2] = coord[2];
166  particlesSurf.getLastProp<ANALYTLB_PART>() = -20.0 * prefactor * (cos2*(35.0*cos2 - 30.0) + 3.0); ;
167  }
168  }
169  particlesSurf.map();
170 
171  if (v_cl.rank() == 0)
172  std::cout << "n: " << n[i]
173  << " dx: " << grid.spacing(0)
174  << std::endl;
175 
176  // SDF and normal
177  init_surface<grid_type,SDF>(grid,center,radius);
178  grid.template ghost_get<SDF>();
179 
180  // NB indices
181  nb_keys.clear();
182  nb_keys_error.clear();
183  get_NB_indices<grid_type,SDF>(grid,7*grid.spacing(0),nb_keys);
184  get_NB_indices<grid_type,SDF>(grid,2*grid.spacing(0),nb_keys_error);
185 
186  // Analytical solution
187  init_analytSol<grid_type,QTY,ANALYTLB>(grid,center,radius);
188 
189  // grid.write_frame(g_output,0,VTK_WRITER);
190  // openfpm_finalize();
191  // return 0;
192  // -------------------------------------------------------------
193  ttotal.start();
194 
195  // 1) CP representation
196  tcp.start();
197  estimateClosestPoint<SDF,CP,POLYORD>(grid,4*grid.spacing(0));
198  grid.template ghost_get<CP>();
199  tcp.stop();
200 
201  tAll.start();
202  // 2) Quantity + extension
203  init_qty<grid_type,QTY>(grid,center,nb_keys);
204  grid.template ghost_get<QTY>();
205  tExtQ.start();
206  extendLSField<SDF,CP,QTY,TMP_EXT_SCAL,3>(grid,4*grid.spacing(0));
207  grid.template ghost_get<QTY>();
208  tExtQ.stop();
209  // init_qty<grid_type,QTY>(grid,center);
210  // grid.template ghost_get<QTY>();
211 
212  // 3) Euclidean Laplacian
213  tLap.start();
214  auto it = grid.getDomainIterator();
215  while(it.isNext()) {
216  auto key = it.get();
217  grid.template get<NUMLB>(key) = second_order_deriv2<grid_type,QTY>(grid,key,0) + second_order_deriv2<grid_type,QTY>(grid,key,1) + second_order_deriv2<grid_type,QTY>(grid,key,2);
218  ++it;
219  }
220  tLap.stop();
221  // FD::Derivative<0,2,2,FD::CENTRAL> Dxx;
222  // FD::Derivative<1,2,2,FD::CENTRAL> Dyy;
223  // FD::Derivative<2,2,2,FD::CENTRAL> Dzz;
224  // auto qty{FD::getV<QTY>(grid)};
225  // auto numLB{FD::getV<NUMLB>(grid)};
226  // tLap.start();
227  // numLB = Dxx(qty) + Dyy(qty) + Dzz(qty);
228  // tLap.stop();
229 
230  // 4) Extend Laplacian
231  tExtRHS.start();
232  grid.template ghost_get<NUMLB>();
233  extendLSField<SDF,CP,NUMLB,TMP_EXT_SCAL,3>(grid,4*grid.spacing(0));
234  grid.template ghost_get<NUMLB>();
235  tExtRHS.stop();
236 
237  tAll.stop();
238 
239  ttotal.stop();
240  // -------------------------------------------------------------
241 
242  std::cout << "n: " << n[i] << " h: " << grid.spacing(0) << std::endl;
243 
244  // Error on grid
245  L_norms abs_norm_nb;
246  get_absolute_error<grid_type,NUMLB,ANALYTLB,ABSERR>(grid,nb_keys_error);
247  abs_norm_nb = get_l_norms_NB<grid_type,ABSERR>(grid,nb_keys_error);
248  write_lnorms_to_file(n[i],abs_norm_nb,"norms_EucLap_mpiSize_" + std::to_string(mpiSize),output_dir);
249 
250  // Interpolate to particles
252  interp_type interpSurf(particlesSurf,grid);
253  set_prop2zero<vector_type,0>(particlesSurf);
254  interpSurf.template m2p<NUMLB,NUMLB_PART>(grid,particlesSurf);
255 
256  // Error on particles
257  double maxError{0}, sumErrorSq{0};
258  auto pit{particlesSurf.getDomainIterator()};
259  while(pit.isNext()) {
260  auto key{pit.get()};
261 
262  particlesSurf.template getProp<ABSERR_PART>(key) = std::fabs(particlesSurf.template getProp<ANALYTLB_PART>(key) - particlesSurf.template getProp<NUMLB_PART>(key));
263  sumErrorSq += particlesSurf.template getProp<ABSERR_PART>(key)*particlesSurf.template getProp<ABSERR_PART>(key);
264  maxError = std::max(maxError,particlesSurf.template getProp<ABSERR_PART>(key));
265 
266  ++pit;
267  }
268  v_cl.sum(sumErrorSq);
269  v_cl.max(maxError);
270  v_cl.execute();
271  abs_norm_nb.l2 = {std::sqrt(sumErrorSq / (double)n_part[i])};
272  abs_norm_nb.linf = maxError;
273  write_lnorms_to_file(n_part[i],abs_norm_nb,"normsParticles_EucLap_mpiSize_" + std::to_string(mpiSize),output_dir);
274 
275  // Time computation
276  double tcp_cpu{tcp.getcputime()}, tExtQ_cpu{tExtQ.getcputime()}, tExtRHS_cpu{tExtRHS.getcputime()}, tLap_cpu{tLap.getcputime()}, tAll_cpu{tAll.getcputime()}, ttotal_cpu{ttotal.getcputime()};
277  double tcp_wct{tcp.getwct()}, tExtQ_wct{tExtQ.getwct()}, tExtRHS_wct{tExtRHS.getwct()}, tLap_wct{tLap.getwct()}, tAll_wct{tAll.getwct()}, ttotal_wct{ttotal.getwct()};
278  v_cl.sum(tcp_cpu);
279  v_cl.sum(tExtQ_cpu);
280  v_cl.sum(tExtRHS_cpu);
281  v_cl.sum(tLap_cpu);
282  v_cl.sum(tAll_cpu);
283  v_cl.sum(tcp_wct);
284  v_cl.sum(tExtQ_wct);
285  v_cl.sum(tExtRHS_wct);
286  v_cl.sum(tLap_wct);
287  v_cl.sum(tAll_wct);
288  v_cl.sum(ttotal_cpu);
289  v_cl.sum(ttotal_wct);
290 
291  v_cl.execute();
292  // grid.write_frame(g_output,0,VTK_WRITER);
293 
294  if (v_cl.rank() == 0) {
295  timesCPU_file << n[i] << "," << std::setprecision(6) << std::scientific << tcp_cpu/v_cl.size() << "," << tExtQ_cpu/v_cl.size() << "," << tAll_cpu/v_cl.size() << "," << tExtRHS_cpu/v_cl.size() << "," << tLap_cpu/v_cl.size() << "," << ttotal_cpu/v_cl.size() << std::endl;
296  timesWCT_file << n[i] << "," << std::setprecision(6) << std::scientific << tcp_wct/v_cl.size() << "," << tExtQ_wct/v_cl.size() << "," << tAll_wct/v_cl.size() << "," << tExtRHS_wct/v_cl.size() << "," << tLap_wct/v_cl.size() << "," << ttotal_wct/v_cl.size() << std::endl;
297  }
298  }
299  norms_file.close();
300 
301  openfpm_finalize();
302  return 0;
303 }
Header file containing functions for creating files and folders.
static void create_directory_if_not_exist(std::string path, bool silent=0)
Creates a directory if not already existent.
This class represent an N-dimensional box.
Definition: Box.hpp:60
Definition: Ghost.hpp:40
void execute()
Execute all the requests.
size_t rank()
Get the process unit id.
size_t size()
Get the total number of processors.
void sum(T &num)
Sum the numbers across all processors and get the result.
void max(T &num)
Get the maximum number across all processors (or reduction with infinity norm)
Implementation of VCluster class.
Definition: VCluster.hpp:59
This is a distributed grid.
Main class for interpolation Particle to mest p2m and Mesh to particle m2p.
Class for cpu time benchmarking.
Definition: timer.hpp:28
void stop()
Stop the timer.
Definition: timer.hpp:119
double getcputime()
Return the cpu time.
Definition: timer.hpp:142
void reset()
Reset the timer.
Definition: timer.hpp:154
void start()
Start the timer.
Definition: timer.hpp:90
double getwct()
Return the elapsed real time.
Definition: timer.hpp:130
Distributed vector.
Functions for level set reinitialization and extension on OpenFPM grids based on closest point method...
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
Definition: aggregate.hpp:221
Boundary conditions.
Definition: common.hpp:22