OpenFPM  5.2.0
Project that contain the implementation of distributed structures
DCPSE_op_subset_test.cu
1 /*
2  * DCPSE_op_test.cpp
3  *
4  * Created on: May 15, 2020
5  * Author: Abhinav Singh
6  *
7  */
8 #include "config.h"
9 #ifdef HAVE_EIGEN
10 #ifdef HAVE_PETSC
11 
12 
13 #define BOOST_TEST_DYN_LINK
14 
15 #include "util/util_debug.hpp"
16 #include <boost/test/unit_test.hpp>
17 #include <iostream>
18 #include "DCPSE/DCPSE_op/DCPSE_op.hpp"
19 #include "DCPSE/DCPSE_op/DCPSE_Solver.hpp"
20 #include "DCPSE/DCPSE_op/DCPSE_Solver.cuh"
21 #include "Operators/Vector/vector_dist_operators.hpp"
22 #include "Vector/vector_dist_subset.hpp"
23 #include "DCPSE/DCPSE_op/EqnsStruct.hpp"
24 
25 //template<typename T>
26 //struct Debug;
27 
28 #if 0
29 BOOST_AUTO_TEST_SUITE(dcpse_op_subset_suite_tests_cu)
30 
31  BOOST_AUTO_TEST_CASE(dcpse_op_subset_tests) {
32  size_t edgeSemiSize = 40;
33  const size_t sz[2] = {2 * edgeSemiSize, 2 * edgeSemiSize};
34  Box<2, double> box({0, 0}, {1.0,1.0});
35  size_t bc[2] = {NON_PERIODIC, NON_PERIODIC};
36  double spacing[2];
37  spacing[0] = 1.0 / (sz[0] - 1);
38  spacing[1] = 1.0 / (sz[1] - 1);
39  double rCut = 3.9 * spacing[0];
40  int ord = 2;
41  double sampling_factor = 4.0;
42  Ghost<2, double> ghost(rCut);
43  BOOST_TEST_MESSAGE("Init vector_dist...");
44  double sigma2 = spacing[0] * spacing[1] / (2 * 4);
45 
47  bc,
48  ghost);
49 
50  //Init_DCPSE(Particles)
51  BOOST_TEST_MESSAGE("Init Particles...");
52  std::mt19937 rng{6666666};
53 
54  std::normal_distribution<> gaussian{0, sigma2};
55 
56 // openfpm::vector<aggregate<int>> bulk;
57 // openfpm::vector<aggregate<int>> boundary;
58 
59  auto it = Particles.getGridIterator(sz);
60  size_t pointId = 0;
61  size_t counter = 0;
62  double minNormOne = 999;
63  while (it.isNext())
64  {
65  Particles.add();
66  auto key = it.get();
67  mem_id k0 = key.get(0);
68  double x = k0 * spacing[0];
69  Particles.getLastPos()[0] = x;//+ gaussian(rng);
70  mem_id k1 = key.get(1);
71  double y = k1 * spacing[1];
72  Particles.getLastPos()[1] = y;//+gaussian(rng);
73  // Here fill the function value
74  Particles.template getLastProp<0>() = sin(Particles.getLastPos()[0]) + sin(Particles.getLastPos()[1]);
75 
76  if (k0 != 0 && k1 != 0 && k0 != sz[0] -1 && k1 != sz[1] - 1)
77  {
78 // bulk.add();
79 // bulk.template get<0>(bulk.size()-1) = Particles.size_local() - 1;
80 
81  Particles.getLastSubset(0);
82  }
83  else
84  {
85 // boundary.add();
86 // boundary.template get<0>(boundary.size()-1) = Particles.size_local() - 1;
87  Particles.getLastSubset(1);
88  }
89 
90 
91  ++counter;
92  ++it;
93  }
94  BOOST_TEST_MESSAGE("Sync Particles across processors...");
95 
96  Particles.map();
97  Particles.ghost_get<0>();
98 
99  auto git = Particles.getGhostIterator();
100 
101  while (git.isNext())
102  {
103  auto p = git.get();
104 
105  Particles.template getProp<0>(p) = std::numeric_limits<double>::quiet_NaN();
106 
107  ++git;
108  }
109 
112  auto & boundary = Particles_boundary.getIds();
113 
114  // move particles
115  auto P = getV<0>(Particles);
116  auto Out = getV<1>(Particles);
117  auto Pb = getV<2>(Particles);
118  auto Out_V = getV<3>(Particles);
119 
120 
121  auto P_bulk = getV<2>(Particles_bulk);
122  auto Out_bulk = getV<1>(Particles_bulk);
123  auto Out_V_bulk = getV<3>(Particles_bulk);
124  Out=10;
125  P_bulk = 5;
126 
127  P_bulk=Pb+Out;
128 // Particles.write("Test_output_subset");
129 
130  // Create the subset
131 
132  /* Derivative_x Dx(Particles, 2, rCut);
133  Derivative_y Dy(Particles, 2, rCut);
134  Derivative_x Dx_bulk(Particles_bulk, 2, rCut);
135 */
136  Derivative_x_gpu Dx_bulk(Particles_bulk, 2, rCut, support_options::RADIUS);
137  Derivative_y_gpu Dy_bulk(Particles_bulk, 2, rCut, support_options::RADIUS);
138 
139  Out_bulk = Dx_bulk(P);
140  Out_V_bulk[0] = P + Dx_bulk(P);
141  Out_V_bulk[1] = Out_V[0] +Dy_bulk(P);
142 
143  // Check
144  bool is_nan = false;
145 
146  auto & v_cl = create_vcluster();
147  if (v_cl.size() > 1)
148  {
149  auto it2 = Particles_bulk.getDomainIterator();
150  while (it2.isNext())
151  {
152  auto p = it2.get();
153 
154  /* BOOST_REQUIRE_EQUAL(Particles_bulk.getProp<2>(p),15.0);
155  BOOST_REQUIRE(fabs(Particles_bulk.getProp<1>(p) - cos(Particles_bulk.getPos(p)[0])) < 0.005 );
156  BOOST_REQUIRE(fabs(Particles_bulk.getProp<3>(p)[0] - Particles_bulk.getProp<0>(p) - cos(Particles_bulk.getPos(p)[0])) < 0.001 );
157  BOOST_REQUIRE(fabs(Particles_bulk.getProp<3>(p)[1] - Particles_bulk.getProp<3>(p)[0] - cos(Particles_bulk.getPos(p)[1])) < 0.001 );*/
158 
159  is_nan |= std::isnan(Particles_bulk.template getProp<1>(p));
160 
161  // Particles_bulk.template getProp<0>(p) = fabs(Particles_bulk.getProp<1>(p) - cos(Particles_bulk.getPos(p)[0]));
162 
163  ++it2;
164  }
165 
166  BOOST_REQUIRE_EQUAL(is_nan,true);
167  }
168 
169 // P_bulk = Dx_bulk(P_bulk); <------------ Incorrect produce error message
170 // P = Dx_bulk(P); <------- Incorrect produce overflow
171 
172  Particles.ghost_get<0>();
173 
174  for (int i = 0 ; i < boundary.size() ; i++)
175  {
176  Particles.template getProp<0>(boundary.template get<0>(i)) = std::numeric_limits<double>::quiet_NaN();
177  }
178 
179  Particles.ghost_get<0>();
180 
181  Out_bulk = Dx_bulk(P);
182  Out_V_bulk[0] = P + Dx_bulk(P);
183  Out_V_bulk[1] = Out_V[0] +Dy_bulk(P);
184 
185  auto it2 = Particles_bulk.getDomainIterator();
186  while (it2.isNext())
187  {
188  auto p = it2.get();
189 
190  BOOST_REQUIRE_EQUAL(Particles_bulk.getProp<2>(p),15.0);
191  BOOST_REQUIRE(fabs(Particles_bulk.getProp<1>(p) - cos(Particles_bulk.getPos(p)[0])) < 0.005 );
192  BOOST_REQUIRE(fabs(Particles_bulk.getProp<3>(p)[0] - Particles_bulk.getProp<0>(p) - cos(Particles_bulk.getPos(p)[0])) < 0.001 );
193  BOOST_REQUIRE(fabs(Particles_bulk.getProp<3>(p)[1] - Particles_bulk.getProp<3>(p)[0] - cos(Particles_bulk.getPos(p)[1])) < 0.001 );
194 
195  ++it2;
196  }
197 
198 
199 
200  }
201 
202  BOOST_AUTO_TEST_CASE(dcpse_op_subset_PC_lid) {
203 // int rank;
204 // MPI_Comm_rank(MPI_COMM_WORLD, &rank);
205  constexpr int x = 0;
206  constexpr int y = 1;
207  size_t edgeSemiSize = 20;
208  const size_t sz[2] = {2 * edgeSemiSize+1, 2 * edgeSemiSize+1};
209  Box<2, double> box({0, 0}, {1.0,1.0});
210  size_t bc[2] = {NON_PERIODIC, NON_PERIODIC};
211  double spacing[2];
212  spacing[0] = 1.0 / (sz[0] - 1);
213  spacing[1] = 1.0 / (sz[1] - 1);
214  double rCut = 3.9 * spacing[0];
215  int ord = 2;
216  double sampling_factor = 4.0;
217  Ghost<2, double> ghost(rCut);
218  BOOST_TEST_MESSAGE("Init vector_dist...");
219  double sigma2 = spacing[0] * spacing[1] / (2 * 4);
220  auto &v_cl = create_vcluster();
221 
223 
224  vector_dist_ws_gpu<2, double, particle_type> Particles(0, box,bc,ghost);
225 
226  //Init_DCPSE(Particles)
227  BOOST_TEST_MESSAGE("Init Particles...");
228 
229 // openfpm::vector<aggregate<int>> bulk;
230 // openfpm::vector<aggregate<int>> boundary;
231 
232  auto it = Particles.getGridIterator(sz);
233  while (it.isNext())
234  {
235  Particles.add();
236  auto key = it.get();
237  mem_id k0 = key.get(0);
238  double xp0 = k0 * spacing[0];
239  Particles.getLastPos()[0] = xp0;
240  mem_id k1 = key.get(1);
241  double yp0 = k1 * spacing[1];
242  Particles.getLastPos()[1] = yp0;
243  ++it;
244  }
245  BOOST_TEST_MESSAGE("Sync Particles across processors...");
246  Particles.map();
247  Particles.ghost_get<0>();
248 
249  auto it2 = Particles.getDomainIterator();
250  while (it2.isNext()) {
251  auto p = it2.get();
252  Point<2, double> xp = Particles.getPos(p);
253  if (xp[0] != 0 && xp[1] != 0 && xp[0] != 1.0 && xp[1] != 1.0) {
254 // bulk.add();
255 // bulk.last().get<0>() = p.getKey();
256  Particles.setSubset(p,0);
257  Particles.getProp<3>(p)[x] = 3.0;
258  Particles.getProp<3>(p)[y] = 3.0;
259  } else {
260 // boundary.add();
261 // boundary.last().get<0>() = p.getKey();
262  Particles.setSubset(p,1);
263  Particles.getProp<3>(p)[x] = xp[0]*xp[0]+xp[1]*xp[1];
264  Particles.getProp<3>(p)[y] = xp[0]*xp[0]-2*xp[0]*xp[1];
265  }
266  Particles.getProp<6>(p)[x] = xp[0]*xp[0]+xp[1]*xp[1];
267  Particles.getProp<6>(p)[y] = xp[0]*xp[0]-2*xp[0]*xp[1];
268  Particles.getProp<7>(p) = xp[0]+xp[1]-1.0;
269 
270  ++it2;
271  }
272 
273  vector_dist_subset_gpu<2, double, particle_type> Particles_bulk(Particles,0);
274  vector_dist_subset_gpu<2, double, particle_type> Particles_boundary(Particles,1);
275  auto & bulk = Particles_bulk.getIds();
276  auto & boundary = Particles_boundary.getIds();
277 
278  auto P = getV<0>(Particles);
279  auto V = getV<1>(Particles);
280  auto RHS = getV<2>(Particles);
281  auto dV = getV<3>(Particles);
282  auto div = getV<4>(Particles);
283  auto V_star = getV<5>(Particles);
284 
285 
286  auto P_bulk = getV<0>(Particles_bulk);
287  auto RHS_bulk =getV<2>(Particles_bulk);
288 
289  P_bulk = 0;
290 
291  Derivative_x_gpu Dx(Particles, 2, rCut, support_options::RADIUS);
292  Derivative_xx_gpu Dxx(Particles, 2, rCut, support_options::RADIUS);
293  Derivative_yy_gpu Dyy(Particles, 2, rCut, support_options::RADIUS);
294  Derivative_y_gpu Dy(Particles, 2, rCut, support_options::RADIUS);
295  Derivative_x_gpu Bulk_Dx(Particles_bulk, 2, rCut, support_options::RADIUS);
296  Derivative_y_gpu Bulk_Dy(Particles_bulk, 2, rCut, support_options::RADIUS);
297 
298  int n = 0, nmax = 5, ctr = 0, errctr=1, Vreset = 0;
299  double V_err=1;
300  if (Vreset == 1) {
301  P_bulk = 0;
302  P = 0;
303  Vreset = 0;
304  }
305  P=0;
306  eq_id vx,vy;
307  vx.setId(0);
308  vy.setId(1);
309  double sum, sum1, sum_k,V_err_eps=1e-3,V_err_old;
310  auto Stokes1=Dxx(V[x])+Dyy(V[x]);
311  auto Stokes2=Dxx(V[y])+Dyy(V[y]);
312 
313  petsc_solver<double> solverPetsc;
314  //solverPetsc.setSolver(KSPGMRES);
315  //solverPetsc.setRestart(250);
316  //solverPetsc.setPreconditioner(PCJACOBI);
317  V_star=0;
318  RHS[x] = dV[x];
319  RHS[y] = dV[y];
320  while (V_err >= V_err_eps && n <= nmax) {
321  Particles.ghost_get<0>(SKIP_LABELLING);
322  RHS_bulk[x] = dV[x] + Bulk_Dx(P);
323  RHS_bulk[y] = dV[y] + Bulk_Dy(P);
324  DCPSE_scheme_gpu<equations2d2_gpu, decltype(Particles)> Solver(Particles);
325  Solver.impose(Stokes1, bulk, RHS[0], vx);
326  Solver.impose(Stokes2, bulk, RHS[1], vy);
327  Solver.impose(V[x], boundary, RHS[0], vx);
328  Solver.impose(V[y], boundary, RHS[1], vy);
329 
330  /*auto A=Solver.getA(options_solver::STANDARD);
331  //A.getMatrixTriplets().save("Tripletes");
332  A.write("Mat_lid");*/
333 
334  Solver.solve_with_solver(solverPetsc, V[x], V[y]);
335  Particles.ghost_get<1>(SKIP_LABELLING);
336  div = -(Dx(V[x]) + Dy(V[y]));
337  P_bulk = P + div;
338  sum = 0;
339  sum1 = 0;
340 
341  for (int j = 0; j < bulk.size(); j++) {
342  auto p = bulk.get<0>(j);
343  sum += (Particles.getProp<5>(p)[0] - Particles.getProp<1>(p)[0]) *
344  (Particles.getProp<5>(p)[0] - Particles.getProp<1>(p)[0]) +
345  (Particles.getProp<5>(p)[1] - Particles.getProp<1>(p)[1]) *
346  (Particles.getProp<5>(p)[1] - Particles.getProp<1>(p)[1]);
347  sum1 += Particles.getProp<1>(p)[0] * Particles.getProp<1>(p)[0] +
348  Particles.getProp<1>(p)[1] * Particles.getProp<1>(p)[1];
349  }
350 
351  sum = sqrt(sum);
352  sum1 = sqrt(sum1);
353  V_star = V;
354  v_cl.sum(sum);
355  v_cl.sum(sum1);
356  v_cl.execute();
357  V_err_old = V_err;
358  V_err = sum / sum1;
359  if (V_err > V_err_old || abs(V_err_old - V_err) < 1e-8) {
360  errctr++;
361  //alpha_P -= 0.1;
362  } else {
363  errctr = 0;
364  }
365  if (n > 3) {
366  if (errctr > 3) {
367  std::cout << "CONVERGENCE LOOP BROKEN DUE TO INCREASE/VERY SLOW DECREASE IN ERROR" << std::endl;
368  Vreset = 1;
369  break;
370  } else {
371  Vreset = 0;
372  }
373  }
374  n++;
375  if (v_cl.rank() == 0) {
376  std::cout << "Rel l2 cgs err in V = " << V_err << " at " << n << std::endl;
377  }
378  }
379  double worst1 = 0.0;
380  double worst2 = 0.0;
381 
382  for(int j=0;j<bulk.size();j++)
383  { auto p=bulk.get<0>(j);
384  if (fabs(Particles.getProp<6>(p)[0] - Particles.getProp<1>(p)[0]) >= worst1) {
385  worst1 = fabs(Particles.getProp<6>(p)[0] - Particles.getProp<1>(p)[0]);
386  }
387  }
388  for(int j=0;j<bulk.size();j++)
389  { auto p=bulk.get<0>(j);
390  if (fabs(Particles.getProp<6>(p)[1] - Particles.getProp<1>(p)[1]) >= worst2) {
391  worst2 = fabs(Particles.getProp<6>(p)[1] - Particles.getProp<1>(p)[1]);
392  }
393  }
394  //Particles.deleteGhost();
395  //Particles.write("PC_subset_lid");
396  std::cout << "Maximum Analytic Error in Vx: " << worst1 << std::endl;
397  std::cout << "Maximum Analytic Error in Vy: " << worst2 << std::endl;
398  BOOST_REQUIRE(worst1 < 0.03);
399  BOOST_REQUIRE(worst2 < 0.03);
400 
401  }
402 
403 
404  BOOST_AUTO_TEST_CASE(dcpse_op_subset_PC_lid2) {
405 // int rank;
406 // MPI_Comm_rank(MPI_COMM_WORLD, &rank);
407  constexpr int x = 0;
408  constexpr int y = 1;
409  size_t edgeSemiSize = 20;
410  const size_t sz[2] = {2 * edgeSemiSize+1, 2 * edgeSemiSize+1};
411  Box<2, double> box({0, 0}, {1.0,1.0});
412  size_t bc[2] = {NON_PERIODIC, NON_PERIODIC};
413  double spacing[2];
414  spacing[0] = 1.0 / (sz[0] - 1);
415  spacing[1] = 1.0 / (sz[1] - 1);
416  double rCut = 3.9 * spacing[0];
417  int ord = 2;
418  double sampling_factor = 4.0;
419  Ghost<2, double> ghost(rCut);
420  BOOST_TEST_MESSAGE("Init vector_dist...");
421  auto &v_cl = create_vcluster();
422 
424  bc,
425  ghost);
427 
428 
429  //Init_DCPSE(Particles)
430  BOOST_TEST_MESSAGE("Init Particles...");
431 
434 
435  auto it = Particles.getGridIterator(sz);
436  size_t pointId = 0;
437  double minNormOne = 999;
438  while (it.isNext())
439  {
440  Particles.add();
441  auto key = it.get();
442  mem_id k0 = key.get(0);
443  double xp0 = k0 * spacing[0];
444  Particles.getLastPos()[0] = xp0;
445  mem_id k1 = key.get(1);
446  double yp0 = k1 * spacing[1];
447  Particles.getLastPos()[1] = yp0;
448  ++it;
449  }
450  BOOST_TEST_MESSAGE("Sync Particles across processors...");
451  Particles.map();
452  Particles.ghost_get<0>();
453  auto it2 = Particles.getDomainIterator();
454  while (it2.isNext()) {
455  auto p = it2.get();
456  Point<2, double> xp = Particles.getPos(p);
457  if (xp[0] != 0 && xp[1] != 0 && xp[0] != 1.0 && xp[1] != 1.0) {
458  bulk.add();
459  bulk.last().get<0>() = p.getKey();
460  Particles.getProp<3>(p)[x] = 3.0;
461  Particles.getProp<3>(p)[y] = 3.0;
462  } else {
463  boundary.add();
464  boundary.last().get<0>() = p.getKey();
465  Particles.getProp<3>(p)[x] = xp[0]*xp[0]+xp[1]*xp[1];
466  Particles.getProp<3>(p)[y] = xp[0]*xp[0]-2*xp[0]*xp[1];
467  }
468  Particles.getProp<6>(p)[x] = xp[0]*xp[0]+xp[1]*xp[1];
469  Particles.getProp<6>(p)[y] = xp[0]*xp[0]-2*xp[0]*xp[1];
470  Particles.getProp<7>(p) = xp[0]+xp[1]-1.0;
471 
472  ++it2;
473  }
474 
475  for (int i = 0; i < bulk.size(); i++) {
476  Particles_subset.add();
477  Particles_subset.getLastPos()[0] = Particles.getPos(bulk.template get<0>(i))[0];
478  Particles_subset.getLastPos()[1] = Particles.getPos(bulk.template get<0>(i))[1];
479  }
480  Particles_subset.map();
481  Particles_subset.ghost_get<0>();
482 
483 
484 
485  auto P = getV<0>(Particles);
486  auto V = getV<1>(Particles);
487  auto RHS = getV<2>(Particles);
488  auto dV = getV<3>(Particles);
489  auto div = getV<4>(Particles);
490  auto V_star = getV<5>(Particles);
491 
492  auto P_bulk = getV<0>(Particles_subset);
493  auto Grad_bulk= getV<2>(Particles_subset);
494 
495  P_bulk = 0;
496 
497  Derivative_x Dx(Particles, 2, rCut, support_options::RADIUS);
498  Derivative_x Bulk_Dx(Particles_subset, 2, rCut, support_options::RADIUS);
499  Derivative_xx Dxx(Particles, 2, rCut, support_options::RADIUS);
500  Derivative_yy Dyy(Particles, 2, rCut, support_options::RADIUS);
501  Derivative_y Dy(Particles, 2, rCut, support_options::RADIUS),Bulk_Dy(Particles_subset, 2, rCut, support_options::RADIUS);;
502 
503  int n = 0, nmax = 5, ctr = 0, errctr=0, Vreset = 0;
504  double V_err=1;
505  if (Vreset == 1) {
506  P_bulk = 0;
507  P = 0;
508  Vreset = 0;
509  }
510  P=0;
511  eq_id vx,vy;
512  vx.setId(0);
513  vy.setId(1);
514  double sum, sum1, sum_k,V_err_eps=1e-3,V_err_old;
515  auto Stokes1=Dxx(V[x])+Dyy(V[x]);
516  auto Stokes2=Dxx(V[y])+Dyy(V[y]);
517 
518  petsc_solver<double> solverPetsc;
519  //solverPetsc.setSolver(KSPGMRES);
520  //solverPetsc.setRestart(250);
521  //solverPetsc.setPreconditioner(PCJACOBI);
522  V_star=0;
523  while (V_err >= V_err_eps && n <= nmax) {
524  RHS[x] = dV[x];
525  RHS[y] = dV[y];
526  Particles_subset.ghost_get<0>(SKIP_LABELLING);
527 
528  Grad_bulk[x] = Bulk_Dx(P_bulk);
529  Grad_bulk[y] = Bulk_Dy(P_bulk);
530  for (int i = 0; i < bulk.size(); i++) {
531  Particles.template getProp<2>(bulk.template get<0>(i))[x] += Particles_subset.getProp<2>(i)[x];
532  Particles.template getProp<2>(bulk.template get<0>(i))[y] += Particles_subset.getProp<2>(i)[y];
533  }
534 
535  DCPSE_scheme<equations2d2_gpu, decltype(Particles)> Solver(Particles);
536  Solver.impose(Stokes1, bulk, RHS[0], vx);
537  Solver.impose(Stokes2, bulk, RHS[1], vy);
538  Solver.impose(V[x], boundary, RHS[0], vx);
539  Solver.impose(V[y], boundary, RHS[1], vy);
540  Solver.solve_with_solver(solverPetsc, V[x], V[y]);
541  Particles.ghost_get<1>(SKIP_LABELLING);
542  div = -(Dx(V[x]) + Dy(V[y]));
543  P = P + div;
544  for (int i = 0; i < bulk.size(); i++) {
545  Particles_subset.getProp<0>(i) = Particles.template getProp<0>(bulk.template get<0>(i));
546  }
547  sum = 0;
548  sum1 = 0;
549 
550  for (int j = 0; j < bulk.size(); j++) {
551  auto p = bulk.get<0>(j);
552  sum += (Particles.getProp<5>(p)[0] - Particles.getProp<1>(p)[0]) *
553  (Particles.getProp<5>(p)[0] - Particles.getProp<1>(p)[0]) +
554  (Particles.getProp<5>(p)[1] - Particles.getProp<1>(p)[1]) *
555  (Particles.getProp<5>(p)[1] - Particles.getProp<1>(p)[1]);
556  sum1 += Particles.getProp<1>(p)[0] * Particles.getProp<1>(p)[0] +
557  Particles.getProp<1>(p)[1] * Particles.getProp<1>(p)[1];
558  }
559 
560  sum = sqrt(sum);
561  sum1 = sqrt(sum1);
562  V_star=V;
563  v_cl.sum(sum);
564  v_cl.sum(sum1);
565  v_cl.execute();
566  V_err_old = V_err;
567  V_err = sum / sum1;
568  if (V_err > V_err_old || abs(V_err_old - V_err) < 1e-8) {
569  errctr++;
570  //alpha_P -= 0.1;
571  } else {
572  errctr = 0;
573  }
574  if (n > 3) {
575  if (errctr > 3) {
576  std::cout << "CONVERGENCE LOOP BROKEN DUE TO INCREASE/VERY SLOW DECREASE IN ERROR" << std::endl;
577  Vreset = 1;
578  break;
579  } else {
580  Vreset = 0;
581  }
582  }
583  n++;
584  if (v_cl.rank() == 0) {
585  std::cout << "Rel l2 cgs err in V = " << V_err << " at " << n << std::endl;
586 
587  }
588  }
589  double worst1 = 0.0;
590  double worst2 = 0.0;
591 
592  for(int j=0;j<bulk.size();j++)
593  { auto p=bulk.get<0>(j);
594  if (fabs(Particles.getProp<6>(p)[0] - Particles.getProp<1>(p)[0]) >= worst1) {
595  worst1 = fabs(Particles.getProp<6>(p)[0] - Particles.getProp<1>(p)[0]);
596  }
597  }
598  for(int j=0;j<bulk.size();j++)
599  { auto p=bulk.get<0>(j);
600  if (fabs(Particles.getProp<6>(p)[1] - Particles.getProp<1>(p)[1]) >= worst2) {
601  worst2 = fabs(Particles.getProp<6>(p)[1] - Particles.getProp<1>(p)[1]);
602  }
603  }
604 
605  std::cout << "Maximum Analytic Error in slice x: " << worst1 << std::endl;
606  std::cout << "Maximum Analytic Error in slice y: " << worst2 << std::endl;
607  BOOST_REQUIRE(worst1 < 0.03);
608  BOOST_REQUIRE(worst2 < 0.03);
609 
610  //Particles.write("PC_subset_lid2");
611  }
612 BOOST_AUTO_TEST_SUITE_END()
613 #endif
614 #endif
615 
616 #endif //if 0
This class represent an N-dimensional box.
Definition: Box.hpp:60
Definition: Ghost.hpp:40
Test structure used for several test.
Definition: Point_test.hpp:106
This class implement the point shape in an N-dimensional space.
Definition: Point.hpp:28
Implementation of 1-D std::vector like structure.
Definition: map_vector.hpp:204
size_t size()
Stub size.
Definition: map_vector.hpp:212
This class is able to do Matrix inversion in parallel with PETSC solvers.
Distributed vector.
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
Definition: aggregate.hpp:221
It model an expression expr1 + ... exprn.
Definition: sum.hpp:93