proteus  1.8.1
C/C++/Fortran libraries
SW2DCV.h
Go to the documentation of this file.
1 #ifndef SW2DCV_H
2 #define SW2DCV_H
3 #include "ArgumentsDict.h"
4 #include "CompKernel.h"
5 #include "ModelFactory.h"
6 #include "xtensor-python/pyarray.hpp"
7 #include <assert.h>
8 #include <cmath>
9 #include <iostream>
10 #include <valarray>
11 
12 namespace py = pybind11;
13 
14 #define POWER_SMOOTHNESS_INDICATOR 2
15 #define VEL_FIX_POWER 2.
16 #define REESTIMATE_MAX_EDGE_BASED_CFL 0
17 #define LIMITING_ITERATION 2
18 
19 /* inline functions */
20 namespace proteus {
21 // FOR CELL BASED ENTROPY VISCOSITY
22 inline double ENTROPY(const double &g, const double &h, const double &hu,
23  const double &hv, const double &z,
24  const double &one_over_hReg) {
25  return 0.5 *
26  (g * h * h + one_over_hReg * (hu * hu + hv * hv) + 2. * g * h * z);
27 }
28 inline double DENTROPY_DH(const double &g, const double &h, const double &hu,
29  const double &hv, const double &z,
30  const double &one_over_hReg) {
31  return g * h - 0.5 * (hu * hu + hv * hv) * std::pow(one_over_hReg, 2) + g * z;
32 }
33 inline double DENTROPY_DHU(const double &g, const double &h, const double &hu,
34  const double &hv, const double &z,
35  const double &one_over_hReg) {
36  return hu * one_over_hReg;
37 }
38 inline double DENTROPY_DHV(const double &g, const double &h, const double &hu,
39  const double &hv, const double &z,
40  const double &one_over_hReg) {
41  return hv * one_over_hReg;
42 }
43 inline double ENTROPY_FLUX1(const double &g, const double &h, const double &hu,
44  const double &hv, const double &z,
45  const double &one_over_hReg) {
46  return (ENTROPY(g, h, hu, hv, z, one_over_hReg) + 0.5 * g * h * h +
47  g * h * z) *
48  hu * one_over_hReg;
49 }
50 inline double ENTROPY_FLUX2(const double &g, const double &h, const double &hu,
51  const double &hv, const double &z,
52  const double &one_over_hReg) {
53  return (ENTROPY(g, h, hu, hv, z, one_over_hReg) + 0.5 * g * h * h +
54  g * h * z) *
55  hv * one_over_hReg;
56 }
57 // FOR ESTIMATING MAX WAVE SPEEDS
58 inline double f(const double &g, const double &h, const double &hZ) {
59  return ((h <= hZ) ? 2. * (sqrt(g * h) - sqrt(g * hZ))
60  : (h - hZ) * sqrt(0.5 * g * (h + hZ) / h / hZ));
61 }
62 inline double phi(const double &g, const double &h, const double &hL,
63  const double &hR, const double &uL, const double &uR) {
64  return (f(g, h, hL) + f(g, h, hR) + uR - uL);
65 }
66 inline double fp(const double &g, const double &h, const double &hZ) {
67  return ((h <= hZ)
68  ? sqrt(g / h)
69  : g * (2 * h * h + h * hZ + hZ * hZ) /
70  (2 * sqrt(2 * g) * h * h * hZ * sqrt(1 / h + 1 / hZ)));
71 }
72 inline double phip(const double &g, const double &h, const double &hL,
73  const double &hR) {
74  return (fp(g, h, hL) + fp(g, h, hR));
75 }
76 inline double nu1(const double &g, const double &hStar, const double &hL,
77  const double &uL, const double &one_over_hL) {
78  return (uL - sqrt(g * hL) *
79  sqrt((1. + fmax((hStar - hL) / 2. * one_over_hL, 0.0)) *
80  (1. + fmax((hStar - hL) * one_over_hL, 0.))));
81 }
82 inline double nu3(const double &g, const double &hStar, const double &hR,
83  const double &uR, const double &one_over_hR) {
84  return (uR + sqrt(g * hR) *
85  sqrt((1. + fmax((hStar - hR) / 2. * one_over_hR, 0.0)) *
86  (1. + fmax((hStar - hR) * one_over_hR, 0.))));
87 }
88 inline double phiDiff(const double &g, const double &h1k, const double &h2k,
89  const double &hL, const double &hR, const double &uL,
90  const double &uR) {
91  return ((phi(g, h2k, hL, hR, uL, uR) - phi(g, h1k, hL, hR, uL, uR)) /
92  (h2k - h1k));
93 }
94 inline double phiDDiff1(const double &g, const double &h1k, const double &h2k,
95  const double &hL, const double &hR, const double &uL,
96  const double &uR) {
97  return ((phiDiff(g, h1k, h2k, hL, hR, uL, uR) - phip(g, h1k, hL, hR)) /
98  (h2k - h1k));
99 }
100 inline double phiDDiff2(const double &g, const double &h1k, const double &h2k,
101  const double &hL, const double &hR, const double &uL,
102  const double &uR) {
103  return ((phip(g, h2k, hL, hR) - phiDiff(g, h1k, h2k, hL, hR, uL, uR)) /
104  (h2k - h1k));
105 }
106 inline double hStarLFromQuadPhiFromAbove(const double &g, const double &hStarL,
107  const double &hStarR, const double &hL,
108  const double &hR, const double &uL,
109  const double &uR) {
110  return (hStarL -
111  2 * phi(g, hStarL, hL, hR, uL, uR) /
112  (phip(g, hStarL, hL, hR) +
113  sqrt(std::pow(phip(g, hStarL, hL, hR), 2) -
114  4 * phi(g, hStarL, hL, hR, uL, uR) *
115  phiDDiff1(g, hStarL, hStarR, hL, hR, uL, uR))));
116 }
117 inline double hStarRFromQuadPhiFromBelow(const double &g, const double &hStarL,
118  const double &hStarR, const double &hL,
119  const double &hR, const double &uL,
120  const double &uR) {
121  return (hStarR -
122  2 * phi(g, hStarR, hL, hR, uL, uR) /
123  (phip(g, hStarR, hL, hR) +
124  sqrt(std::pow(phip(g, hStarR, hL, hR), 2) -
125  4 * phi(g, hStarR, hL, hR, uL, uR) *
126  phiDDiff2(g, hStarL, hStarR, hL, hR, uL, uR))));
127 }
128 } // namespace proteus
129 
130 namespace proteus {
131 
132 class SW2DCV_base {
133 public:
134  virtual ~SW2DCV_base() {}
135  virtual void convexLimiting(arguments_dict &args) = 0;
136  virtual double calculateEdgeBasedCFL(arguments_dict &args) = 0;
137  virtual void calculateEV(arguments_dict &args) = 0;
138  virtual void calculateResidual(arguments_dict &args) = 0;
139  virtual void calculateMassMatrix(arguments_dict &args) = 0;
140  virtual void calculateLumpedMassMatrix(arguments_dict &args) = 0;
141 };
142 
143 template <class CompKernelType, int nSpace, int nQuadraturePoints_element,
144  int nDOF_mesh_trial_element, int nDOF_trial_element,
145  int nDOF_test_element, int nQuadraturePoints_elementBoundary>
146 class SW2DCV : public SW2DCV_base {
147 public:
149  CompKernelType ck;
151  : nDOF_test_X_trial_element(nDOF_test_element * nDOF_trial_element),
152  ck() {
153  std::cout << "Constructing SW2DCV<CompKernelTemplate<" << nSpace << ","
154  << nQuadraturePoints_element << "," << nDOF_mesh_trial_element
155  << "," << nDOF_trial_element << "," << nDOF_test_element << ","
156  << nQuadraturePoints_elementBoundary << ">());" << std::endl
157  << std::flush;
158  }
159 
160  inline double maxWaveSpeedSharpInitialGuess(double g, double nx, double ny,
161  double hL, double huL, double hvL,
162  double hR, double huR, double hvR,
163  double hEps, bool debugging) {
164  double lambda1, lambda3;
165  // 1-eigenvalue: uL-sqrt(g*hL)
166  // 3-eigenvalue: uR+sqrt(g*hR)
167 
168  // To avoid division by 0
169  double one_over_hL = 2.0 * hL / (hL * hL + std::pow(fmax(hL, hEps), 2.0));
170  double one_over_hR = 2.0 * hR / (hR * hR + std::pow(fmax(hR, hEps), 2.0));
171 
172  double hVelL = nx * huL + ny * hvL;
173  double hVelR = nx * huR + ny * hvR;
174  double velL = one_over_hL * hVelL;
175  double velR = one_over_hR * hVelR;
176 
177  double x0 = std::pow(2. * sqrt(2.) - 1., 2.);
178  double hMin = fmin(hL, hR);
179  double hMax = fmax(hL, hR);
180 
181  double hStar;
182  double fMin = phi(g, x0 * hMin, hL, hR, velL, velR);
183  double fMax = phi(g, x0 * hMax, hL, hR, velL, velR);
184 
185  double sqrMin = sqrt(hMin);
186  double sqrMax = sqrt(hMax);
187 
188  if (0. <= fMin) {
189  hStar = fmin(
190  x0 * hMin,
191  std::pow(fmax(0., velL - velR + 2. * sqrt(g) * (sqrt(hL) + sqrt(hR))),
192  2) /
193  16. / g);
194  } else if (0. <= fMax) {
195  double a = 1.0 / (2.0 * sqrt(2.0));
196  double c = -hMin * a - sqrMin * sqrMax +
197  sqrMin * (velR - velL) / (2.0 * sqrt(g));
198  double delta = hMin - 4.0 * a * c;
199  if (delta < 0.0) {
200  std::cout << "Bug in computing lambda. Exiting." << std::endl;
201  abort();
202  }
203  hStar = fmin(x0 * hMax, std::pow((-sqrMin + sqrt(delta)) / (2. * a), 2));
204  } else {
205  hStar = sqrMin * sqrMax *
206  (1.0 + sqrt(2.0 / g) * (velL - velR) / (sqrMin + sqrMax));
207  }
208 
209  // return lambda_max
210  lambda1 = nu1(g, hStar, hL, velL, one_over_hL);
211  lambda3 = nu3(g, hStar, hR, velR, one_over_hR);
212  return fmax(fabs(lambda1), fabs(lambda3));
213  }
214 
215  inline void calculateCFL(const double &elementDiameter, const double &g,
216  const double &h, const double &hu, const double &hv,
217  const double hEps, double &cfl) {
218  double cflx, cfly, c = sqrt(fmax(g * hEps, g * h));
219  double u = 2 * h / (h * h + std::pow(fmax(h, hEps), 2)) * hu;
220  double v = 2 * h / (h * h + std::pow(fmax(h, hEps), 2)) * hv;
221 
222  if (u > 0.0)
223  cflx = (u + c) / elementDiameter;
224  else
225  cflx = fabs(u - c) / elementDiameter;
226 
227  if (v > 0.0)
228  cfly = (v + c) / elementDiameter;
229  else
230  cfly = fabs(v - c) / elementDiameter;
231  cfl = sqrt(cflx * cflx + cfly * cfly); // hack, conservative estimate
232  }
233 
235  double dt = args.scalar<double>("dt");
236  int NNZ = args.scalar<int>("NNZ");
237  int numDOFs = args.scalar<int>("numDOFs");
238  xt::pyarray<double> &lumped_mass_matrix =
239  args.array<double>("lumped_mass_matrix");
240  xt::pyarray<double> &h_old = args.array<double>("h_old");
241  xt::pyarray<double> &hu_old = args.array<double>("hu_old");
242  xt::pyarray<double> &hv_old = args.array<double>("hv_old");
243  xt::pyarray<double> &b_dof = args.array<double>("b_dof");
244  xt::pyarray<double> &high_order_hnp1 =
245  args.array<double>("high_order_hnp1");
246  xt::pyarray<double> &high_order_hunp1 =
247  args.array<double>("high_order_hunp1");
248  xt::pyarray<double> &high_order_hvnp1 =
249  args.array<double>("high_order_hvnp1");
250  xt::pyarray<double> &extendedSourceTerm_hu =
251  args.array<double>("extendedSourceTerm_hu");
252  xt::pyarray<double> &extendedSourceTerm_hv =
253  args.array<double>("extendedSourceTerm_hv");
254  xt::pyarray<double> &limited_hnp1 = args.array<double>("limited_hnp1");
255  xt::pyarray<double> &limited_hunp1 = args.array<double>("limited_hunp1");
256  xt::pyarray<double> &limited_hvnp1 = args.array<double>("limited_hvnp1");
257  xt::pyarray<int> &csrRowIndeces_DofLoops =
258  args.array<int>("csrRowIndeces_DofLoops");
259  xt::pyarray<int> &csrColumnOffsets_DofLoops =
260  args.array<int>("csrColumnOffsets_DofLoops");
261  xt::pyarray<double> &MassMatrix = args.array<double>("MassMatrix");
262  xt::pyarray<double> &dH_minus_dL = args.array<double>("dH_minus_dL");
263  xt::pyarray<double> &muH_minus_muL = args.array<double>("muH_minus_muL");
264  double hEps = args.scalar<double>("hEps");
265  int LUMPED_MASS_MATRIX = args.scalar<int>("LUMPED_MASS_MATRIX");
266  xt::pyarray<double> &dLow = args.array<double>("dLow");
267  xt::pyarray<double> &new_SourceTerm_hu =
268  args.array<double>("new_SourceTerm_hu");
269  xt::pyarray<double> &new_SourceTerm_hv =
270  args.array<double>("new_SourceTerm_hv");
271  xt::pyarray<double> &hLow = args.array<double>("hLow");
272  xt::pyarray<double> &huLow = args.array<double>("huLow");
273  xt::pyarray<double> &hvLow = args.array<double>("hvLow");
274  xt::pyarray<double> &h_min = args.array<double>("h_min");
275  xt::pyarray<double> &h_max = args.array<double>("h_max");
276  xt::pyarray<double> &kin_max = args.array<double>("kin_max");
277  double KE_tiny = args.scalar<double>("KE_tiny");
278 
279  // Declare stuff for limiting on h and h*heta
280  std::valarray<double> Rneg(0.0, numDOFs), Rpos(0.0, numDOFs);
281 
282  // Create FCT component matrices in vector form
283  std::valarray<double> FCT_h(0.0, dH_minus_dL.size()),
284  FCT_hu(0.0, dH_minus_dL.size()), FCT_hv(0.0, dH_minus_dL.size());
285 
287  // Loop to define FCT matrices for each component //
289  int ij = 0;
290  for (int i = 0; i < numDOFs; i++) {
291  // Read some vectors
292  double high_order_hnp1i = high_order_hnp1[i];
293  double high_order_hunp1i = high_order_hunp1[i];
294  double high_order_hvnp1i = high_order_hvnp1[i];
295  double hi = h_old[i];
296  double huni = hu_old[i];
297  double hvni = hv_old[i];
298  double Zi = b_dof[i];
299  double mi = lumped_mass_matrix[i];
300  double one_over_hiReg =
301  2 * hi / (hi * hi + std::pow(fmax(hi, hEps), 2)); // hEps
302 
303  // LOOP OVER THE SPARSITY PATTERN (j-LOOP)//
304  for (int offset = csrRowIndeces_DofLoops[i];
305  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
306 
307  int j = csrColumnOffsets_DofLoops[offset];
308 
309  if (i != j) {
310  // Read some vectors
311  double hj = h_old[j];
312  double hunj = hu_old[j];
313  double hvnj = hv_old[j];
314  double Zj = b_dof[j];
315  double one_over_hjReg =
316  2. * hj / (hj * hj + std::pow(fmax(hj, hEps), 2));
317 
318  // Compute star states
319  double hStarij = fmax(0., hi + Zi - fmax(Zi, Zj));
320  double huStarij = huni * hStarij * one_over_hiReg;
321  double hvStarij = hvni * hStarij * one_over_hiReg;
322 
323  double hStarji = fmax(0., hj + Zj - fmax(Zi, Zj));
324  double huStarji = hunj * hStarji * one_over_hjReg;
325  double hvStarji = hvnj * hStarji * one_over_hjReg;
326 
327  // i-th row of flux correction matrix
328  double ML_minus_MC = (LUMPED_MASS_MATRIX == 1
329  ? 0.
330  : (i == j ? 1. : 0.) * mi - MassMatrix[ij]);
331 
332  FCT_h[ij] =
333  ML_minus_MC *
334  (high_order_hnp1[j] - hj - (high_order_hnp1i - hi)) +
335  dt * (dH_minus_dL[ij] - muH_minus_muL[ij]) * (hStarji - hStarij) +
336  dt * muH_minus_muL[ij] * (hj - hi);
337 
338  FCT_hu[ij] = ML_minus_MC * (high_order_hunp1[j] - hunj -
339  (high_order_hunp1i - huni)) +
340  dt * (dH_minus_dL[ij] - muH_minus_muL[ij]) *
341  (huStarji - huStarij) +
342  dt * muH_minus_muL[ij] * (hunj - huni);
343 
344  FCT_hv[ij] = ML_minus_MC * (high_order_hvnp1[j] - hvnj -
345  (high_order_hvnp1i - hvni)) +
346  dt * (dH_minus_dL[ij] - muH_minus_muL[ij]) *
347  (hvStarji - hvStarij) +
348  dt * muH_minus_muL[ij] * (hvnj - hvni);
349 
350  } else {
351  FCT_h[ij] = 0.0;
352  FCT_hu[ij] = 0.0;
353  FCT_hv[ij] = 0.0;
354  }
355 
356  // UPDATE ij //
357  ij += 1;
358  } // j loop ends here
359  } // i loop ends here
360 
362  // Main loop to define limiters and computed limited solution //////
364 
365  // Create Lij_array and initialize with 1
366  std::valarray<double> Lij_array(1.0, dH_minus_dL.size());
367 
368  /* Loop over limiting iterations */
369  for (int limit_iter = 0; limit_iter < LIMITING_ITERATION; limit_iter++) {
370 
371  /* Loop to define FCT Rpos and Rneg values */
372  ij = 0;
373  for (int i = 0; i < numDOFs; i++) {
374 
375  double hi = h_old[i];
376  double mi = lumped_mass_matrix[i];
377  // Initialize Pneg and Ppos quantities at ith node
378  double Pnegi = 0., Pposi = 0.;
379 
380  // LOOP OVER THE SPARSITY PATTERN (j-LOOP)
381  for (int offset = csrRowIndeces_DofLoops[i];
382  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
383 
384  int j = csrColumnOffsets_DofLoops[offset];
385 
386  // COMPUTE P VECTORS
387  Pnegi += FCT_h[ij] * ((FCT_h[ij] < 0) ? 1. : 0.);
388  Pposi += FCT_h[ij] * ((FCT_h[ij] > 0) ? 1. : 0.);
389 
390  // UPDATE ij
391  ij += 1;
392  } // j loop ends here
393 
394  double psmall_h = 1E-14 * fmax(fabs(Pnegi), fabs(Pposi));
395 
397  // COMPUTE Q VECTORS //
399  double Qnegi = std::min(mi * (h_min[i] - hLow[i]), 0.0);
400  double Qposi = std::max(mi * (h_max[i] - hLow[i]), 0.0);
401 
403  // COMPUTE R VECTORS //
405  if (hi <= hEps) {
406  Rneg[i] = 0.;
407  Rpos[i] = 0.;
408  } else {
409  // for h
410  if (Pnegi >= -psmall_h) {
411  Rneg[i] = 1.0;
412  } else {
413  Rneg[i] = std::min(1.0, Qnegi / Pnegi);
414  }
415  if (Pposi <= psmall_h) {
416  Rpos[i] = 1.0;
417  } else {
418  Rpos[i] = std::min(1.0, Qposi / Pposi);
419  }
420  }
421  } // i loop ends here
422 
423  /* Here we compute the limiters */
424  ij = 0;
425  for (int i = 0; i < numDOFs; i++) {
426 
427  double mi = lumped_mass_matrix[i];
428  double ci =
429  kin_max[i] * hLow[i] -
430  0.5 * (huLow[i] * huLow[i] + hvLow[i] * hvLow[i]); // for KE lim.
431 
432  // LOOP OVER THE SPARSITY PATTERN (j-LOOP)//
433  for (int offset = csrRowIndeces_DofLoops[i];
434  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
435 
436  int j = csrColumnOffsets_DofLoops[offset];
437 
438  if (j != i) {
439 
440  // Compute limiter based on water height
441  if (FCT_h[ij] >= 0.) {
442  Lij_array[ij] = fmin(Lij_array[ij], std::min(Rneg[j], Rpos[i]));
443  } else {
444  Lij_array[ij] = fmin(Lij_array[ij], std::min(Rneg[i], Rpos[j]));
445  }
446 
447  /*======================================================*/
448  /* Kinetic Energy limiting */
449  double lambdaj =
450  csrRowIndeces_DofLoops[i + 1] - csrRowIndeces_DofLoops[i] - 1;
451  double Ph_ij = FCT_h[ij] / mi / lambdaj;
452  double Phu_ij = FCT_hu[ij] / mi / lambdaj;
453  double Phv_ij = FCT_hv[ij] / mi / lambdaj;
454 
455  // Here we initialize limiter based on kinetic energy
456  double KE_limiter = 1;
457  double neg_root_i = 1., neg_root_j = 1.;
458 
459  // We first check if local kinetic energy > 0
460  if (kin_max[i] * hLow[i] <= KE_tiny)
461  KE_limiter = 0.;
462 
463  // We then check if KE bound is already satisfied
464  double hi_with_lijPij = hLow[i] + Lij_array[ij] * Ph_ij;
465  double hui_with_lijPij = huLow[i] + Lij_array[ij] * Phu_ij;
466  double hvi_with_lijPij = hvLow[i] + Lij_array[ij] * Phv_ij;
467  double psi = kin_max[i] * hi_with_lijPij -
468  0.5 * (hui_with_lijPij * hui_with_lijPij +
469  hvi_with_lijPij * hvi_with_lijPij);
470  if (psi > -KE_tiny) {
471  KE_limiter = fmin(KE_limiter, Lij_array[ij]);
472  }
473 
474  /*======================================================*/
475 
476  double ai = -0.5 * (Phu_ij * Phu_ij + Phv_ij * Phv_ij);
477  double bi =
478  kin_max[i] * Ph_ij - (huLow[i] * Phu_ij + hvLow[i] * Phv_ij);
479  double delta_i = bi * bi - 4. * ai * ci;
480 
481  if (delta_i < 0. || ai >= -0.) {
482  KE_limiter = fmin(KE_limiter, Lij_array[ij]);
483  } else {
484  neg_root_i = (-bi - std::sqrt(delta_i)) / 2. / ai;
485  }
486 
487  // root of jth-DOF (To compute transpose component)
488  double lambdai =
489  csrRowIndeces_DofLoops[j + 1] - csrRowIndeces_DofLoops[j] - 1;
490  double mj = lumped_mass_matrix[j];
491  double cj = kin_max[j] * hLow[j] -
492  0.5 * (huLow[j] * huLow[j] + hvLow[j] * hvLow[j]);
493  double Ph_ji = -FCT_h[ij] / mj / lambdai; // Aij=-Aji
494  double Phu_ji = -FCT_hu[ij] / mj / lambdai;
495  double Phv_ji = -FCT_hv[ij] / mj / lambdai;
496  double aj = -0.5 * (Phu_ji * Phu_ji + Phv_ji * Phv_ji);
497  double bj =
498  kin_max[j] * Ph_ji - (huLow[j] * Phu_ji + hvLow[j] * Phv_ji);
499  double delta_j = bj * bj - 4. * aj * cj;
500 
501  if (delta_j < 0. || aj >= -0.) {
502  KE_limiter = fmin(KE_limiter, Lij_array[ij]);
503  } else {
504  neg_root_j = (-bj - std::sqrt(delta_j)) / 2. / aj;
505  }
506 
507  // define final limiter based on KE
508  KE_limiter =
509  fmin(KE_limiter, fmin(fabs(neg_root_i), fabs(neg_root_j)));
510 
511  // Here we set final limiter
512  Lij_array[ij] = fmin(KE_limiter, Lij_array[ij]);
513 
514  } else {
515  // if i = j then lij = 0
516  Lij_array[ij] = 0.;
517  }
518 
519  // update ij
520  ij += 1;
521  } // end j loop
522  } // end i loop
523 
524  /* Final loop to apply limiting and then define limited solution */
525  ij = 0;
526  for (int i = 0; i < numDOFs; i++) {
527 
528  double one_over_mi = 1.0 / lumped_mass_matrix[i];
529  double ith_Limiter_times_FluxCorrectionMatrix1 = 0.;
530  double ith_Limiter_times_FluxCorrectionMatrix2 = 0.;
531  double ith_Limiter_times_FluxCorrectionMatrix3 = 0.;
532 
533  // LOOP OVER THE SPARSITY PATTERN (j-LOOP)//
534  for (int offset = csrRowIndeces_DofLoops[i];
535  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
536 
537  int j = csrColumnOffsets_DofLoops[offset];
538 
539  // COMPUTE LIMITED FLUX //
540  ith_Limiter_times_FluxCorrectionMatrix1 += Lij_array[ij] * FCT_h[ij];
541  ith_Limiter_times_FluxCorrectionMatrix2 += Lij_array[ij] * FCT_hu[ij];
542  ith_Limiter_times_FluxCorrectionMatrix3 += Lij_array[ij] * FCT_hv[ij];
543 
544  // update ij
545  ij += 1;
546  } // end j loop
547 
548  // then we add lij*Aij to uLow
549  hLow[i] += one_over_mi * ith_Limiter_times_FluxCorrectionMatrix1;
550  huLow[i] += one_over_mi * ith_Limiter_times_FluxCorrectionMatrix2;
551  hvLow[i] += one_over_mi * ith_Limiter_times_FluxCorrectionMatrix3;
552 
553  // Finally define the limited solution
554  limited_hnp1[i] = hLow[i];
555  limited_hunp1[i] = huLow[i] + dt * one_over_mi * new_SourceTerm_hu[i];
556  limited_hvnp1[i] = hvLow[i] + dt * one_over_mi * new_SourceTerm_hv[i];
557 
558  if (limited_hnp1[i] < -hEps && dt < 1.0) {
559  std::cout << "Limited water height is negative: \n "
560  << "hLow: " << hLow[i] << "\n"
561  << "hHigh: " << limited_hnp1[i] << "\n"
562  << "hEps: " << hEps << "\n"
563  << " ... aborting!" << std::endl;
564  abort();
565  } else {
566  // clean up uHigh from round off error
567  if (limited_hnp1[i] < hEps)
568  limited_hnp1[i] = 0.0;
569  double aux = fmax(limited_hnp1[i], hEps);
570  limited_hunp1[i] *= 2 * std::pow(limited_hnp1[i], VEL_FIX_POWER) /
571  (std::pow(limited_hnp1[i], VEL_FIX_POWER) +
572  std::pow(aux, VEL_FIX_POWER));
573  limited_hvnp1[i] *= 2 * std::pow(limited_hnp1[i], VEL_FIX_POWER) /
574  (std::pow(limited_hnp1[i], VEL_FIX_POWER) +
575  std::pow(aux, VEL_FIX_POWER));
576  }
577  } // end i loop
578 
579  // update FCT matrices as Fct = (1 - Lij)*Fct
580  FCT_h = (1.0 - Lij_array) * FCT_h;
581  FCT_hu = (1.0 - Lij_array) * FCT_hu;
582  FCT_hv = (1.0 - Lij_array) * FCT_hv;
583  } // end loop for limiting iteration
584  } // end convex limiting function
585 
587  double g = args.scalar<double>("g");
588  int numDOFsPerEqn = args.scalar<int>("numDOFsPerEqn");
589  xt::pyarray<double> &lumped_mass_matrix =
590  args.array<double>("lumped_mass_matrix");
591  xt::pyarray<double> &h_dof_old = args.array<double>("h_dof_old");
592  xt::pyarray<double> &hu_dof_old = args.array<double>("hu_dof_old");
593  xt::pyarray<double> &hv_dof_old = args.array<double>("hv_dof_old");
594  xt::pyarray<double> &b_dof = args.array<double>("b_dof");
595  xt::pyarray<int> &csrRowIndeces_DofLoops =
596  args.array<int>("csrRowIndeces_DofLoops");
597  xt::pyarray<int> &csrColumnOffsets_DofLoops =
598  args.array<int>("csrColumnOffsets_DofLoops");
599  double hEps = args.scalar<double>("hEps");
600  xt::pyarray<double> &Cx = args.array<double>("Cx");
601  xt::pyarray<double> &Cy = args.array<double>("Cy");
602  xt::pyarray<double> &CTx = args.array<double>("CTx");
603  xt::pyarray<double> &CTy = args.array<double>("CTy");
604  xt::pyarray<double> &dLow = args.array<double>("dLow");
605  double run_cfl = args.scalar<double>("run_cfl");
606  xt::pyarray<double> &edge_based_cfl = args.array<double>("edge_based_cfl");
607  int debug = args.scalar<int>("debug");
608 
609  double max_edge_based_cfl = 0.;
610 
611  int ij = 0;
612  for (int i = 0; i < numDOFsPerEqn; i++) {
613  // solution at time tn for the ith DOF
614  double hi = h_dof_old[i];
615  double hui = hu_dof_old[i];
616  double hvi = hv_dof_old[i];
617  double dLowii = 0.;
618 
619  for (int offset = csrRowIndeces_DofLoops[i];
620  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
621 
622  // loop in j (sparsity pattern)
623  int j = csrColumnOffsets_DofLoops[offset];
624 
625  // solution at time tn for the jth DOF
626  double hj = h_dof_old[j];
627  double huj = hu_dof_old[j];
628  double hvj = hv_dof_old[j];
629 
630  if (i != j) {
632  // DISSIPATIVE MATRIX //
634  double cij_norm = sqrt(Cx[ij] * Cx[ij] + Cy[ij] * Cy[ij]);
635  double cji_norm = sqrt(CTx[ij] * CTx[ij] + CTy[ij] * CTy[ij]);
636  double nxij = Cx[ij] / cij_norm, nyij = Cy[ij] / cij_norm;
637  double nxji = CTx[ij] / cji_norm, nyji = CTy[ij] / cji_norm;
638  dLow[ij] =
639  fmax(maxWaveSpeedSharpInitialGuess(g, nxij, nyij, hi, hui, hvi,
640  hj, huj, hvj, hEps, debug) *
641  cij_norm, // hEps
642  maxWaveSpeedSharpInitialGuess(g, nxji, nyji, hj, huj, hvj,
643  hi, hui, hvi, hEps, debug) *
644  cji_norm); // hEps
645  dLowii -= dLow[ij];
646  } else
647  dLow[ij] = 0.;
648  // update ij
649  ij += 1;
650  }
652  // CALCULATE EDGE BASED CFL //
654  double mi = lumped_mass_matrix[i];
655  edge_based_cfl[i] = 1.0 * fabs(dLowii) / mi;
656  max_edge_based_cfl = fmax(max_edge_based_cfl, edge_based_cfl[i]);
657  }
658  return max_edge_based_cfl;
659  } // End calculateEdgeBasedCFL
660 
662  double g = args.scalar<double>("g");
663  xt::pyarray<double> &h_dof_old = args.array<double>("h_dof_old");
664  xt::pyarray<double> &hu_dof_old = args.array<double>("hu_dof_old");
665  xt::pyarray<double> &hv_dof_old = args.array<double>("hv_dof_old");
666  xt::pyarray<double> &b_dof = args.array<double>("b_dof");
667  xt::pyarray<double> &Cx = args.array<double>("Cx");
668  xt::pyarray<double> &Cy = args.array<double>("Cy");
669  xt::pyarray<double> &CTx = args.array<double>("CTx");
670  xt::pyarray<double> &CTy = args.array<double>("CTy");
671  int numDOFsPerEqn = args.scalar<int>("numDOFsPerEqn");
672  xt::pyarray<int> &csrRowIndeces_DofLoops =
673  args.array<int>("csrRowIndeces_DofLoops");
674  xt::pyarray<int> &csrColumnOffsets_DofLoops =
675  args.array<int>("csrColumnOffsets_DofLoops");
676  xt::pyarray<double> &lumped_mass_matrix =
677  args.array<double>("lumped_mass_matrix");
678  double eps = args.scalar<double>("eps");
679  double hEps = args.scalar<double>("hEps");
680  xt::pyarray<double> &global_entropy_residual =
681  args.array<double>("global_entropy_residual");
682  double &dij_small = args.scalar<double>("dij_small");
683 
685  // ********** FIRST LOOP ON DOFs ********** //
687 
688  // To compute:
689  // * Entropy at i-th node
690  std::valarray<double> eta(numDOFsPerEqn);
691  for (int i = 0; i < numDOFsPerEqn; i++) {
692  // COMPUTE ENTROPY. NOTE: WE CONSIDER A FLAT BOTTOM
693  double hi = h_dof_old[i];
694  double one_over_hiReg =
695  2 * hi / (hi * hi + std::pow(fmax(hi, hEps), 2)); // hEps
696  eta[i] = ENTROPY(g, hi, hu_dof_old[i], hv_dof_old[i], 0., one_over_hiReg);
697  }
698 
699  // ********** END OF FIRST LOOP ON DOFs ********** //
700 
702  // ********** SECOND LOOP ON DOFs ********** //
704  // To compute:
705  // * global entropy residual
706  // * dij_small to avoid division by 0
707 
708  int ij = 0;
709  std::valarray<double> etaMax(numDOFsPerEqn), etaMin(numDOFsPerEqn);
710 
711  // speed = sqrt(g max(h_0)), I divide by h_epsilon to get max(h_0)
712  double speed = std::sqrt(g * hEps / eps);
713  dij_small = 0.0;
714 
715  for (int i = 0; i < numDOFsPerEqn; i++) {
716 
717  // solution at time tn for the ith DOF
718  double hi = h_dof_old[i];
719  double hui = hu_dof_old[i];
720  double hvi = hv_dof_old[i];
721  double Zi = b_dof[i];
722 
723  // Define some things using above
724  double one_over_hiReg =
725  2 * hi / (hi * hi + std::pow(fmax(hi, hEps), 2)); // hEps
726  double ui = hui * one_over_hiReg;
727  double vi = hvi * one_over_hiReg;
728  double mi = lumped_mass_matrix[i];
729 
730  // initialize etaMax and etaMin
731  etaMax[i] = fabs(eta[i]);
732  etaMin[i] = fabs(eta[i]);
733 
734  // FOR ENTROPY RESIDUAL, NOTE: FLAT BOTTOM //
735  double ith_flux_term1 = 0., ith_flux_term2 = 0., ith_flux_term3 = 0.;
736  double entropy_flux = 0.;
737  double sum_entprime_flux = 0.;
738  double eta_prime1 = DENTROPY_DH(g, hi, hui, hvi, 0., one_over_hiReg);
739  double eta_prime2 = DENTROPY_DHU(g, hi, hui, hvi, 0., one_over_hiReg);
740  double eta_prime3 = DENTROPY_DHV(g, hi, hui, hvi, 0., one_over_hiReg);
741 
742  // loop in j (sparsity pattern)
743  for (int offset = csrRowIndeces_DofLoops[i];
744  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
745 
746  int j = csrColumnOffsets_DofLoops[offset];
747 
748  // solution at time tn for the jth DOF
749  double hj = h_dof_old[j];
750  double huj = hu_dof_old[j];
751  double hvj = hv_dof_old[j];
752  double Zj = b_dof[j];
753 
754  // Then define some things here using above
755  double one_over_hjReg =
756  2.0 * hj / (hj * hj + std::pow(fmax(hj, hEps), 2));
757  double uj = huj * one_over_hjReg;
758  double vj = hvj * one_over_hjReg;
759 
760  // auxiliary functions to compute fluxes
761  double aux_h =
762  (uj * hj - ui * hi) * Cx[ij] + (vj * hj - vi * hi) * Cy[ij];
763  double aux_hu =
764  (uj * huj - ui * hui) * Cx[ij] + (vj * huj - vi * hui) * Cy[ij];
765  double aux_hv =
766  (uj * hvj - ui * hvi) * Cx[ij] + (vj * hvj - vi * hvi) * Cy[ij];
767 
768  // flux for entropy
769  ith_flux_term1 += aux_h;
770  ith_flux_term2 += aux_hu + 0.5 * g * hj * hj * Cx[ij];
771  ith_flux_term3 += aux_hv + 0.5 * g * hj * hj * Cy[ij];
772 
773  // NOTE: WE CONSIDER FLAT BOTTOM
774  entropy_flux +=
775  (Cx[ij] * ENTROPY_FLUX1(g, hj, huj, hvj, 0., one_over_hjReg) +
776  Cy[ij] * ENTROPY_FLUX2(g, hj, huj, hvj, 0., one_over_hjReg));
777 
778  // COMPUTE ETA MIN AND ETA MAX //
779  etaMax[i] = fmax(etaMax[i], fabs(eta[j]));
780  etaMin[i] = fmin(etaMin[i], fabs(eta[j]));
781 
782  // define dij_small in j loop
783  double x = fabs(Cx[ij]) + fabs(Cy[ij]);
784  dij_small = fmax(dij_small, x * speed);
785 
786  // update ij
787  ij += 1;
788  } // end j loop
789 
790  // define sum of entprime*flux
791  sum_entprime_flux =
792  (ith_flux_term1 * eta_prime1 + ith_flux_term2 * eta_prime2 +
793  ith_flux_term3 * eta_prime3);
794 
795  // define rescale for normalization
796  double small_rescale = g * hEps * hEps / eps;
797  double rescale = fmax(fabs(etaMax[i] - etaMin[i]) / 2., small_rescale);
798 
799  // COMPUTE ENTROPY RESIDUAL //
800  double one_over_entNormFactori = 1.0 / rescale;
801  global_entropy_residual[i] =
802  one_over_entNormFactori * fabs(entropy_flux - sum_entprime_flux);
803  if (hi <= hEps) {
804  global_entropy_residual[i] = 1.0;
805  }
806  } // end i loop
807 
808  // Finally dij_small here
809  dij_small = 1E-14 * dij_small;
810  // ********** END OF LOOP IN DOFs ********** //
811  } // end calculateEV
812 
814  xt::pyarray<double> &mesh_trial_ref = args.array<double>("mesh_trial_ref");
815  xt::pyarray<double> &mesh_grad_trial_ref =
816  args.array<double>("mesh_grad_trial_ref");
817  xt::pyarray<double> &mesh_dof = args.array<double>("mesh_dof");
818  xt::pyarray<int> &mesh_l2g = args.array<int>("mesh_l2g");
819  xt::pyarray<double> &dV_ref = args.array<double>("dV_ref");
820  xt::pyarray<double> &h_trial_ref = args.array<double>("h_trial_ref");
821  xt::pyarray<double> &h_grad_trial_ref =
822  args.array<double>("h_grad_trial_ref");
823  xt::pyarray<double> &h_test_ref = args.array<double>("h_test_ref");
824  xt::pyarray<double> &h_grad_test_ref =
825  args.array<double>("h_grad_test_ref");
826  xt::pyarray<double> &vel_trial_ref = args.array<double>("vel_trial_ref");
827  xt::pyarray<double> &vel_grad_trial_ref =
828  args.array<double>("vel_grad_trial_ref");
829  xt::pyarray<double> &vel_test_ref = args.array<double>("vel_test_ref");
830  xt::pyarray<double> &vel_grad_test_ref =
831  args.array<double>("vel_grad_test_ref");
832  xt::pyarray<double> &mesh_trial_trace_ref =
833  args.array<double>("mesh_trial_trace_ref");
834  xt::pyarray<double> &mesh_grad_trial_trace_ref =
835  args.array<double>("mesh_grad_trial_trace_ref");
836  xt::pyarray<double> &h_trial_trace_ref =
837  args.array<double>("h_trial_trace_ref");
838  xt::pyarray<double> &h_grad_trial_trace_ref =
839  args.array<double>("h_grad_trial_trace_ref");
840  xt::pyarray<double> &h_test_trace_ref =
841  args.array<double>("h_test_trace_ref");
842  xt::pyarray<double> &h_grad_test_trace_ref =
843  args.array<double>("h_grad_test_trace_ref");
844  xt::pyarray<double> &vel_trial_trace_ref =
845  args.array<double>("vel_trial_trace_ref");
846  xt::pyarray<double> &vel_grad_trial_trace_ref =
847  args.array<double>("vel_grad_trial_trace_ref");
848  xt::pyarray<double> &vel_test_trace_ref =
849  args.array<double>("vel_test_trace_ref");
850  xt::pyarray<double> &vel_grad_test_trace_ref =
851  args.array<double>("vel_grad_test_trace_ref");
852  xt::pyarray<double> &normal_ref = args.array<double>("normal_ref");
853  xt::pyarray<double> &boundaryJac_ref =
854  args.array<double>("boundaryJac_ref");
855  xt::pyarray<double> &elementDiameter =
856  args.array<double>("elementDiameter");
857  int nElements_global = args.scalar<int>("nElements_global");
858  double g = args.scalar<double>("g");
859  xt::pyarray<int> &h_l2g = args.array<int>("h_l2g");
860  xt::pyarray<int> &vel_l2g = args.array<int>("vel_l2g");
861  xt::pyarray<double> &h_dof_old = args.array<double>("h_dof_old");
862  xt::pyarray<double> &hu_dof_old = args.array<double>("hu_dof_old");
863  xt::pyarray<double> &hv_dof_old = args.array<double>("hv_dof_old");
864  xt::pyarray<double> &b_dof = args.array<double>("b_dof");
865  xt::pyarray<double> &h_dof = args.array<double>("h_dof");
866  xt::pyarray<double> &hu_dof = args.array<double>("hu_dof");
867  xt::pyarray<double> &hv_dof = args.array<double>("hv_dof");
868  xt::pyarray<double> &q_cfl = args.array<double>("q_cfl");
869  xt::pyarray<int> &sdInfo_hu_hu_rowptr =
870  args.array<int>("sdInfo_hu_hu_rowptr");
871  xt::pyarray<int> &sdInfo_hu_hu_colind =
872  args.array<int>("sdInfo_hu_hu_colind");
873  xt::pyarray<int> &sdInfo_hu_hv_rowptr =
874  args.array<int>("sdInfo_hu_hv_rowptr");
875  xt::pyarray<int> &sdInfo_hu_hv_colind =
876  args.array<int>("sdInfo_hu_hv_colind");
877  xt::pyarray<int> &sdInfo_hv_hv_rowptr =
878  args.array<int>("sdInfo_hv_hv_rowptr");
879  xt::pyarray<int> &sdInfo_hv_hv_colind =
880  args.array<int>("sdInfo_hv_hv_colind");
881  xt::pyarray<int> &sdInfo_hv_hu_rowptr =
882  args.array<int>("sdInfo_hv_hu_rowptr");
883  xt::pyarray<int> &sdInfo_hv_hu_colind =
884  args.array<int>("sdInfo_hv_hu_colind");
885  int offset_h = args.scalar<int>("offset_h");
886  int offset_hu = args.scalar<int>("offset_hu");
887  int offset_hv = args.scalar<int>("offset_hv");
888  int stride_h = args.scalar<int>("stride_h");
889  int stride_hu = args.scalar<int>("stride_hu");
890  int stride_hv = args.scalar<int>("stride_hv");
891  xt::pyarray<double> &globalResidual = args.array<double>("globalResidual");
892  int nExteriorElementBoundaries_global =
893  args.scalar<int>("nExteriorElementBoundaries_global");
894  xt::pyarray<int> &exteriorElementBoundariesArray =
895  args.array<int>("exteriorElementBoundariesArray");
896  xt::pyarray<int> &elementBoundaryElementsArray =
897  args.array<int>("elementBoundaryElementsArray");
898  xt::pyarray<int> &elementBoundaryLocalElementBoundariesArray =
899  args.array<int>("elementBoundaryLocalElementBoundariesArray");
900  xt::pyarray<int> &isDOFBoundary_h = args.array<int>("isDOFBoundary_h");
901  xt::pyarray<int> &isDOFBoundary_hu = args.array<int>("isDOFBoundary_hu");
902  xt::pyarray<int> &isDOFBoundary_hv = args.array<int>("isDOFBoundary_hv");
903  xt::pyarray<int> &isAdvectiveFluxBoundary_h =
904  args.array<int>("isAdvectiveFluxBoundary_h");
905  xt::pyarray<int> &isAdvectiveFluxBoundary_hu =
906  args.array<int>("isAdvectiveFluxBoundary_hu");
907  xt::pyarray<int> &isAdvectiveFluxBoundary_hv =
908  args.array<int>("isAdvectiveFluxBoundary_hv");
909  xt::pyarray<int> &isDiffusiveFluxBoundary_hu =
910  args.array<int>("isDiffusiveFluxBoundary_hu");
911  xt::pyarray<int> &isDiffusiveFluxBoundary_hv =
912  args.array<int>("isDiffusiveFluxBoundary_hv");
913  xt::pyarray<double> &ebqe_bc_h_ext = args.array<double>("ebqe_bc_h_ext");
914  xt::pyarray<double> &ebqe_bc_flux_mass_ext =
915  args.array<double>("ebqe_bc_flux_mass_ext");
916  xt::pyarray<double> &ebqe_bc_flux_mom_hu_adv_ext =
917  args.array<double>("ebqe_bc_flux_mom_hu_adv_ext");
918  xt::pyarray<double> &ebqe_bc_flux_mom_hv_adv_ext =
919  args.array<double>("ebqe_bc_flux_mom_hv_adv_ext");
920  xt::pyarray<double> &ebqe_bc_hu_ext = args.array<double>("ebqe_bc_hu_ext");
921  xt::pyarray<double> &ebqe_bc_flux_hu_diff_ext =
922  args.array<double>("ebqe_bc_flux_hu_diff_ext");
923  xt::pyarray<double> &ebqe_penalty_ext =
924  args.array<double>("ebqe_penalty_ext");
925  xt::pyarray<double> &ebqe_bc_hv_ext = args.array<double>("ebqe_bc_hv_ext");
926  xt::pyarray<double> &ebqe_bc_flux_hv_diff_ext =
927  args.array<double>("ebqe_bc_flux_hv_diff_ext");
928  xt::pyarray<double> &q_velocity = args.array<double>("q_velocity");
929  xt::pyarray<double> &ebqe_velocity = args.array<double>("ebqe_velocity");
930  xt::pyarray<double> &flux = args.array<double>("flux");
931  xt::pyarray<double> &elementResidual_h =
932  args.array<double>("elementResidual_h");
933  xt::pyarray<double> &Cx = args.array<double>("Cx");
934  xt::pyarray<double> &Cy = args.array<double>("Cy");
935  xt::pyarray<double> &CTx = args.array<double>("CTx");
936  xt::pyarray<double> &CTy = args.array<double>("CTy");
937  int numDOFsPerEqn = args.scalar<int>("numDOFsPerEqn");
938  int NNZ = args.scalar<int>("NNZ");
939  xt::pyarray<int> &csrRowIndeces_DofLoops =
940  args.array<int>("csrRowIndeces_DofLoops");
941  xt::pyarray<int> &csrColumnOffsets_DofLoops =
942  args.array<int>("csrColumnOffsets_DofLoops");
943  xt::pyarray<double> &lumped_mass_matrix =
944  args.array<double>("lumped_mass_matrix");
945  double cfl_run = args.scalar<double>("cfl_run");
946  double eps = args.scalar<double>("eps");
947  double hEps = args.scalar<double>("hEps");
948  xt::pyarray<double> &hnp1_at_quad_point =
949  args.array<double>("hnp1_at_quad_point");
950  xt::pyarray<double> &hunp1_at_quad_point =
951  args.array<double>("hunp1_at_quad_point");
952  xt::pyarray<double> &hvnp1_at_quad_point =
953  args.array<double>("hvnp1_at_quad_point");
954  xt::pyarray<double> &extendedSourceTerm_hu =
955  args.array<double>("extendedSourceTerm_hu");
956  xt::pyarray<double> &extendedSourceTerm_hv =
957  args.array<double>("extendedSourceTerm_hv");
958  xt::pyarray<double> &dH_minus_dL = args.array<double>("dH_minus_dL");
959  xt::pyarray<double> &muH_minus_muL = args.array<double>("muH_minus_muL");
960  double cE = args.scalar<double>("cE");
961  int LUMPED_MASS_MATRIX = args.scalar<int>("LUMPED_MASS_MATRIX");
962  double dt = args.scalar<double>("dt");
963  int LINEAR_FRICTION = args.scalar<int>("LINEAR_FRICTION");
964  double mannings = args.scalar<double>("mannings");
965  xt::pyarray<double> &quantDOFs = args.array<double>("quantDOFs");
966  int SECOND_CALL_CALCULATE_RESIDUAL =
967  args.scalar<int>("SECOND_CALL_CALCULATE_RESIDUAL");
968  int COMPUTE_NORMALS = args.scalar<int>("COMPUTE_NORMALS");
969  xt::pyarray<double> &normalx = args.array<double>("normalx");
970  xt::pyarray<double> &normaly = args.array<double>("normaly");
971  xt::pyarray<double> &dLow = args.array<double>("dLow");
972  int lstage = args.scalar<int>("lstage");
973  xt::pyarray<double> &new_SourceTerm_hu =
974  args.array<double>("new_SourceTerm_hu");
975  xt::pyarray<double> &new_SourceTerm_hv =
976  args.array<double>("new_SourceTerm_hv");
977  xt::pyarray<double> &global_entropy_residual =
978  args.array<double>("global_entropy_residual");
979  double dij_small = args.scalar<double>("dij_small");
980  xt::pyarray<double> &hLow = args.array<double>("hLow");
981  xt::pyarray<double> &huLow = args.array<double>("huLow");
982  xt::pyarray<double> &hvLow = args.array<double>("hvLow");
983  xt::pyarray<double> &h_min = args.array<double>("h_min");
984  xt::pyarray<double> &h_max = args.array<double>("h_max");
985  xt::pyarray<double> &kin_max = args.array<double>("kin_max");
986  xt::pyarray<double> &urelax = args.array<double>("urelax");
987  xt::pyarray<double> &drelax = args.array<double>("drelax");
988  // FOR FRICTION//
989  double n2 = std::pow(mannings, 2.);
990  double gamma = 4. / 3;
991  double xi = 10.;
992 
994  // ********** CELL LOOPS ********** //
996  // To compute:
997  // * Time derivative term
998  // * Cell based CFL
999  // * Velocity and soln at quad points (for other models)
1000  for (int eN = 0; eN < nElements_global; eN++) {
1001  // declare local storage for element residual and initialize
1002  register double elementResidual_h[nDOF_test_element],
1003  elementResidual_hu[nDOF_test_element],
1004  elementResidual_hv[nDOF_test_element];
1005 
1006  for (int i = 0; i < nDOF_test_element; i++) {
1007  elementResidual_h[i] = 0.0;
1008  elementResidual_hu[i] = 0.0;
1009  elementResidual_hv[i] = 0.0;
1010  }
1011  //
1012  // loop over quadrature points and compute integrands
1013  //
1014  for (int k = 0; k < nQuadraturePoints_element; k++) {
1015  // compute indices and declare local storage
1016  register int eN_k = eN * nQuadraturePoints_element + k,
1017  eN_k_nSpace = eN_k * nSpace,
1018  eN_nDOF_trial_element = eN * nDOF_trial_element;
1019  register double h = 0.0, hu = 0.0,
1020  hv = 0.0, // solution at current time
1021  h_old = 0.0, hu_old = 0.0, hv_old = 0.0, // solution at lstage
1022  jac[nSpace * nSpace], jacDet, jacInv[nSpace * nSpace],
1023  h_test_dV[nDOF_trial_element], dV, x, y, xt, yt;
1024  // get jacobian, etc for mapping reference element
1025  ck.calculateMapping_element(
1026  eN, k, mesh_dof.data(), mesh_l2g.data(), mesh_trial_ref.data(),
1027  mesh_grad_trial_ref.data(), jac, jacDet, jacInv, x, y);
1028  // get the physical integration weight
1029  dV = fabs(jacDet) * dV_ref[k];
1030  // get the solution at current time
1031  ck.valFromDOF(h_dof.data(), &h_l2g.data()[eN_nDOF_trial_element],
1032  &h_trial_ref.data()[k * nDOF_trial_element], h);
1033  ck.valFromDOF(hu_dof.data(), &vel_l2g[eN_nDOF_trial_element],
1034  &vel_trial_ref.data()[k * nDOF_trial_element], hu);
1035  ck.valFromDOF(hv_dof.data(), &vel_l2g[eN_nDOF_trial_element],
1036  &vel_trial_ref.data()[k * nDOF_trial_element], hv);
1037  // get the solution at the lstage
1038  ck.valFromDOF(h_dof_old.data(), &h_l2g.data()[eN_nDOF_trial_element],
1039  &h_trial_ref.data()[k * nDOF_trial_element], h_old);
1040  ck.valFromDOF(hu_dof_old.data(), &vel_l2g.data()[eN_nDOF_trial_element],
1041  &vel_trial_ref.data()[k * nDOF_trial_element], hu_old);
1042  ck.valFromDOF(hv_dof_old.data(), &vel_l2g.data()[eN_nDOF_trial_element],
1043  &vel_trial_ref.data()[k * nDOF_trial_element], hv_old);
1044  // calculate cell based CFL to keep a reference
1045  calculateCFL(elementDiameter.data()[eN], g, h_old, hu_old, hv_old, hEps,
1046  q_cfl[eN_k]);
1047  // precalculate test function products with integration weights
1048  for (int j = 0; j < nDOF_trial_element; j++)
1049  h_test_dV[j] = h_test_ref[k * nDOF_trial_element + j] * dV;
1050 
1051  // SAVE VELOCITY // at quadrature points for other models to use
1052  q_velocity[eN_k_nSpace + 0] =
1053  2 * h / (h * h + std::pow(fmax(h, hEps), 2)) * hu;
1054  q_velocity[eN_k_nSpace + 1] =
1055  2 * h / (h * h + std::pow(fmax(h, hEps), 2)) * hv;
1056  hnp1_at_quad_point[eN_k] = h;
1057  hunp1_at_quad_point[eN_k] = hu;
1058  hvnp1_at_quad_point[eN_k] = hv;
1059 
1060  for (int i = 0; i < nDOF_test_element; i++) {
1061  // compute time derivative part of global residual. NOTE: no lumping
1062  elementResidual_h[i] += (h - h_old) * h_test_dV[i];
1063  elementResidual_hu[i] += (hu - hu_old) * h_test_dV[i];
1064  elementResidual_hv[i] += (hv - hv_old) * h_test_dV[i];
1065  }
1066  }
1067  // distribute
1068  for (int i = 0; i < nDOF_test_element; i++) {
1069  register int eN_i = eN * nDOF_test_element + i;
1070 
1071  // global i-th index for h (this is same for vel_l2g)
1072  int h_gi = h_l2g[eN_i];
1073 
1074  // distribute time derivative to global residual
1075  globalResidual[offset_h + stride_h * h_gi] += elementResidual_h[i];
1076  globalResidual[offset_hu + stride_hu * h_gi] += elementResidual_hu[i];
1077  globalResidual[offset_hv + stride_hv * h_gi] += elementResidual_hv[i];
1078  }
1079  }
1080  // ********** END OF CELL LOOPS ********** //
1081 
1082  if (SECOND_CALL_CALCULATE_RESIDUAL == 0) // This is to save some time
1083  {
1085  // ********** FIRST SET OF LOOP ON DOFs ********** //
1087  // To compute:
1088  // * Local bounds for limiting
1089  // * Low order solution (in terms of bar states)
1090 
1091  // Here we declare some arrays for local bounds
1092  std::valarray<double> delta_Sqd_h(0.0, numDOFsPerEqn),
1093  bar_deltaSqd_h(0.0, numDOFsPerEqn), delta_Sqd_kin(0.0, numDOFsPerEqn),
1094  bar_deltaSqd_kin(0.0, numDOFsPerEqn);
1095  xt::pyarray<double> kin(numDOFsPerEqn), max_of_h_and_hEps(numDOFsPerEqn);
1096 
1097  // Define kinetic energy, kin = 1/2 q^2 / h
1098  max_of_h_and_hEps = xt::where(h_dof_old > hEps, h_dof_old, hEps);
1099  kin = 0.5 * (hu_dof_old * hu_dof_old + hv_dof_old * hv_dof_old);
1100  kin *= 2.0 * h_dof_old /
1101  (h_dof_old * h_dof_old + max_of_h_and_hEps * max_of_h_and_hEps);
1102 
1103  /* First loop to define: delta_Sqd_h, delta_Sqd_kin */
1104  for (int i = 0; i < numDOFsPerEqn; i++) {
1105 
1106  for (int offset = csrRowIndeces_DofLoops[i];
1107  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
1108 
1109  int j = csrColumnOffsets_DofLoops[offset];
1110 
1111  if (i != j) {
1112  delta_Sqd_h[i] += h_dof_old[i] - h_dof_old[j];
1113  delta_Sqd_kin[i] += kin[i] - kin[j];
1114  }
1115  } // j loop ends here
1116  } // i loops ends here
1117 
1118  // Stuff for bar states here (BT = BarTilde)
1119  std::valarray<double> hBT(0.0, dH_minus_dL.size()),
1120  huBT(0.0, dH_minus_dL.size()), hvBT(0.0, dH_minus_dL.size());
1121 
1122  /* Second loop to compute bar states (variable)BT */
1123  int ij = 0;
1124  for (int i = 0; i < numDOFsPerEqn; i++) {
1125 
1126  // define things at ith node
1127  double hi = h_dof_old[i];
1128  double hui = hu_dof_old[i];
1129  double hvi = hv_dof_old[i];
1130  double Zi = b_dof[i];
1131  double mi = lumped_mass_matrix[i];
1132  double one_over_hiReg =
1133  2.0 * hi / (hi * hi + std::pow(fmax(hi, hEps), 2));
1134  double ui = hui * one_over_hiReg;
1135  double vi = hvi * one_over_hiReg;
1136 
1137  // define full pressure at ith node for definition of bar states
1138  double pressure_i = 0.5 * g * hi * hi;
1139 
1140  // loop over the sparsity pattern of the i-th DOF
1141  for (int offset = csrRowIndeces_DofLoops[i];
1142  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
1143  int j = csrColumnOffsets_DofLoops[offset];
1144 
1145  // define things at jth node
1146  double hj = h_dof_old[j];
1147  double huj = hu_dof_old[j];
1148  double hvj = hv_dof_old[j];
1149  double Zj = b_dof[j];
1150  double one_over_hjReg =
1151  2.0 * hj / (hj * hj + std::pow(fmax(hj, hEps), 2));
1152  double uj = huj * one_over_hjReg;
1153  double vj = hvj * one_over_hjReg;
1154 
1155  // for bar_deltaSqd_h, bar_deltaSqd_heta, bar_deltaSqd_kin
1156  double muLowij = 0., muLij = 0., dLowij = 0., dLij = 0.;
1157 
1158  if (i != j) {
1159  // Put these computations first before it gets messy
1160  bar_deltaSqd_h[i] += 0.5 * delta_Sqd_h[j] + 0.5 * delta_Sqd_h[i];
1161  bar_deltaSqd_kin[i] +=
1162  0.5 * delta_Sqd_kin[j] + 0.5 * delta_Sqd_kin[i];
1163 
1164  if (lstage == 0)
1165  dLowij = dLow[ij];
1166  else {
1167  double cij_norm = sqrt(Cx[ij] * Cx[ij] + Cy[ij] * Cy[ij]);
1168  double cji_norm = sqrt(CTx[ij] * CTx[ij] + CTy[ij] * CTy[ij]);
1169  double nxij = Cx[ij] / cij_norm, nyij = Cy[ij] / cij_norm;
1170  double nxji = CTx[ij] / cji_norm, nyji = CTy[ij] / cji_norm;
1171  dLowij = fmax(
1172  maxWaveSpeedSharpInitialGuess(g, nxij, nyij, hi, hui, hvi, hj,
1173  huj, hvj, hEps, false) *
1174  cij_norm,
1175  maxWaveSpeedSharpInitialGuess(g, nxji, nyji, hj, huj, hvj, hi,
1176  hui, hvi, hEps, false) *
1177  cji_norm);
1178  }
1179  // save dLij
1180  dLij = dLowij;
1181 
1182  // compute muij
1183  muLij = fmax(fmax(0., -(ui * Cx[ij] + vi * Cy[ij])),
1184  fmax(0., (uj * Cx[ij] + vj * Cy[ij])));
1185 
1186  // Define dLij as max of dLij and muLij
1187  dLij = fmax(dLowij, muLij);
1188  dLow[ij] = fmax(dLij, muLij);
1189 
1191  // COMPUTE BAR STATES //
1193 
1194  // define pressure at jth node for bar states
1195  double pressure_j = 0.5 * g * hj * hj;
1196 
1197  // Compute star states
1198  double hStarij = fmax(0., hi + Zi - fmax(Zi, Zj));
1199  double huStarij = hui * hStarij * one_over_hiReg;
1200  double hvStarij = hvi * hStarij * one_over_hiReg;
1201  double hStarji = fmax(0., hj + Zj - fmax(Zi, Zj));
1202  double huStarji = huj * hStarji * one_over_hjReg;
1203  double hvStarji = hvj * hStarji * one_over_hjReg;
1204 
1205  double hBar_ij = 0., hTilde_ij = 0., huBar_ij = 0., huTilde_ij = 0.,
1206  hvBar_ij = 0., hvTilde_ij = 0.;
1207 
1208  // h component
1209  hBar_ij = -1. / (2.0 * fmax(dLij, dij_small)) *
1210  ((uj * hj - ui * hi) * Cx[ij] +
1211  (vj * hj - vi * hi) * Cy[ij]) +
1212  0.5 * (hj + hi);
1213  hTilde_ij = (dLij - muLij) / (2.0 * fmax(dLij, dij_small)) *
1214  (hStarji - hj - (hStarij - hi));
1215  // hu component
1216  huBar_ij =
1217  -1. / (2.0 * fmax(dLij, dij_small)) *
1218  ((uj * huj - ui * hui + pressure_j - pressure_i) * Cx[ij] +
1219  (vj * huj - vi * hui) * Cy[ij]) +
1220  0.5 * (huj + hui);
1221  huTilde_ij = (dLij - muLij) / (2.0 * fmax(dLij, dij_small)) *
1222  (huStarji - huj - (huStarij - hui));
1223  // hv component
1224  hvBar_ij =
1225  -1. / (2.0 * fmax(dLij, dij_small)) *
1226  ((uj * hvj - ui * hvi) * Cx[ij] +
1227  (vj * hvj - vi * hvi + pressure_j - pressure_i) * Cy[ij]) +
1228  0.5 * (hvj + hvi);
1229  hvTilde_ij = (dLij - muLij) / (2.0 * fmax(dLij, dij_small)) *
1230  (hvStarji - hvj - (hvStarij - hvi));
1231 
1232  // Here we define uBar + uTilde
1233  hBT[ij] = hBar_ij + hTilde_ij;
1234  huBT[ij] = huBar_ij + huTilde_ij;
1235  hvBT[ij] = hvBar_ij + hvTilde_ij;
1236  } else {
1237  // i==j
1238  // Bar states by definition satisfy Utilde_ii + Ubar_ii = U_i
1239  hBT[ij] = hi;
1240  huBT[ij] = hui;
1241  hvBT[ij] = hvi;
1242  }
1243 
1244  // UPDATE ij //
1245  ij += 1;
1246  } // j loop ends here
1247 
1248  // for bar_deltaSqd_h, bar_deltaSqd_heta, bar_deltaSqd_kin
1249  bar_deltaSqd_h[i] =
1250  bar_deltaSqd_h[i] /
1251  (csrRowIndeces_DofLoops[i + 1] - csrRowIndeces_DofLoops[i] - 1) /
1252  2.0;
1253  bar_deltaSqd_kin[i] =
1254  bar_deltaSqd_kin[i] /
1255  (csrRowIndeces_DofLoops[i + 1] - csrRowIndeces_DofLoops[i] - 1) /
1256  2.0;
1257 
1258  } // i loops ends here
1259 
1260  /* Then final loop of first set to get local bounds */
1261  ij = 0;
1262  for (int i = 0; i < numDOFsPerEqn; i++) {
1263 
1264  // define m_i
1265  double mi = lumped_mass_matrix[i];
1266 
1267  /* Initialize hmin, hmax */
1268  h_min[i] = h_dof_old[i];
1269  h_max[i] = h_dof_old[i];
1270 
1271  /* Initialize low order solution */
1272  hLow[i] = h_dof_old[i];
1273  huLow[i] = hu_dof_old[i];
1274  hvLow[i] = hv_dof_old[i];
1275  kin_max[i] = kin[i];
1276 
1277  // loop in j (sparsity pattern)
1278  for (int offset = csrRowIndeces_DofLoops[i];
1279  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
1280 
1281  int j = csrColumnOffsets_DofLoops[offset];
1282 
1283  double one_over_hBT =
1284  2.0 * hBT[ij] /
1285  (hBT[ij] * hBT[ij] + std::pow(fmax(hBT[ij], hEps), 2));
1286  double psi_ij = one_over_hBT *
1287  (huBT[ij] * huBT[ij] + hvBT[ij] * hvBT[ij]) /
1288  2.0; // Eqn (6.31)
1289 
1290  // COMPUTE LOCAL BOUNDS //
1291  kin_max[i] = fmax(psi_ij, kin_max[i]);
1292  h_min[i] = std::min(h_min[i], hBT[ij]);
1293  h_max[i] = std::max(h_max[i], hBT[ij]);
1294 
1295  /* COMPUTE LOW ORDER SOLUTION. See EQN 6.23 in SW friction paper */
1296  // This is low order solution WITHOUT sources
1297  if (i != j) {
1298  hLow[i] += h_dof_old[i] * (-dt / mi * 2 * dLow[ij]) +
1299  dt / mi * (2 * dLow[ij] * hBT[ij]);
1300  huLow[i] += hu_dof_old[i] * (-dt / mi * 2 * dLow[ij]) +
1301  dt / mi * (2 * dLow[ij] * huBT[ij]);
1302  hvLow[i] += hv_dof_old[i] * (-dt / mi * 2 * dLow[ij]) +
1303  dt / mi * (2 * dLow[ij] * hvBT[ij]);
1304  }
1305 
1306  // UPDATE ij //
1307  ij += 1;
1308  } // j loop ends here
1309 
1310  // Then do relaxation of bounds here. If confused, see convex
1311  // limiting paper
1312  kin_max[i] = std::min(urelax[i] * kin_max[i],
1313  kin_max[i] + std::abs(bar_deltaSqd_kin[i]));
1314  h_min[i] = std::max(drelax[i] * h_min[i],
1315  h_min[i] - std::abs(bar_deltaSqd_h[i]));
1316  h_max[i] = std::min(urelax[i] * h_max[i],
1317  h_max[i] + std::abs(bar_deltaSqd_h[i]));
1318 
1319  // clean up hLow from round off error
1320  if (hLow[i] < hEps)
1321  hLow[i] = 0.0;
1322  } // i loop ends here
1323 
1325  // ********** Second set of loops on dofs ********** //
1327  // To compute:
1328  // * Hyperbolic part of the flux
1329  // * Extended source terms
1330  // * Smoothness indicator
1331 
1332  ij = 0;
1333  std::valarray<double> hyp_flux_h(numDOFsPerEqn),
1334  hyp_flux_hu(numDOFsPerEqn), hyp_flux_hv(numDOFsPerEqn),
1335  psi(numDOFsPerEqn), etaMax(numDOFsPerEqn), etaMin(numDOFsPerEqn);
1336 
1337  for (int i = 0; i < numDOFsPerEqn; i++) {
1338  // solution at time tn for the ith DOF
1339  double hi = h_dof_old[i];
1340  double hui = hu_dof_old[i];
1341  double hvi = hv_dof_old[i];
1342  double Zi = b_dof[i];
1343  // Define some things using above
1344  double one_over_hiReg =
1345  2 * hi / (hi * hi + std::pow(fmax(hi, hEps), 2)); // hEps
1346  double ui = hui * one_over_hiReg;
1347  double vi = hvi * one_over_hiReg;
1348  double mi = lumped_mass_matrix[i];
1349 
1350  /* COMPUTE EXTENDED SOURCE TERMS for all equations:
1351  * Friction terms
1352  * NOTE: Be careful with sign of source terms. Extended sources are on
1353  * left side of equations. "new_SourceTerm" variables are on right
1354  */
1355 
1356  // FRICTION
1357  if (LINEAR_FRICTION == 1) {
1358  extendedSourceTerm_hu[i] = mannings * hui * mi;
1359  extendedSourceTerm_hv[i] = mannings * hvi * mi;
1360  // For use in the convex limiting function
1361  // actually didn't need to do this but it helps with signs
1362  new_SourceTerm_hu[i] = -mannings * hui * mi;
1363  new_SourceTerm_hv[i] = -mannings * hvi * mi;
1364  } else {
1365  double veli_norm = std::sqrt(ui * ui + vi * vi);
1366  double hi_to_the_gamma = std::pow(fmax(hi, hEps), gamma);
1367  double friction_aux =
1368  veli_norm == 0.
1369  ? 0.
1370  : (2 * g * n2 * veli_norm * mi /
1371  (hi_to_the_gamma +
1372  fmax(hi_to_the_gamma, xi * g * n2 * dt * veli_norm)));
1373  extendedSourceTerm_hu[i] = friction_aux * hui;
1374  extendedSourceTerm_hv[i] = friction_aux * hvi;
1375  // For use in the convex limiting function
1376  new_SourceTerm_hu[i] = -friction_aux * hui;
1377  new_SourceTerm_hv[i] = -friction_aux * hvi;
1378  }
1379 
1380  /* HYPERBOLIC FLUXES */
1381  hyp_flux_h[i] = 0;
1382  hyp_flux_hu[i] = 0;
1383  hyp_flux_hv[i] = 0;
1384 
1385  // FOR SMOOTHNESS INDICATOR //
1386  double alphai;
1387  double alpha_numerator = 0;
1388  double alpha_denominator = 0;
1389  double alpha_zero = 0.5; // if only want smoothness
1390  double alpha_factor = 1.0 / (1.0 - alpha_zero);
1391 
1392  // loop in j (sparsity pattern)
1393  for (int offset = csrRowIndeces_DofLoops[i];
1394  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
1395 
1396  int j = csrColumnOffsets_DofLoops[offset];
1397 
1398  // solution at time tn for the jth DOF
1399  double hj = h_dof_old[j];
1400  double huj = hu_dof_old[j];
1401  double hvj = hv_dof_old[j];
1402  double Zj = b_dof[j];
1403  // Then define some things here using above
1404  double one_over_hjReg =
1405  2.0 * hj / (hj * hj + std::pow(fmax(hj, hEps), 2));
1406  double uj = huj * one_over_hjReg;
1407  double vj = hvj * one_over_hjReg;
1408 
1409  // auxiliary functions to compute fluxes
1410  double aux_h =
1411  (uj * hj - ui * hi) * Cx[ij] + (vj * hj - vi * hi) * Cy[ij];
1412  double aux_hu =
1413  (uj * huj - ui * hui) * Cx[ij] + (vj * huj - vi * hui) * Cy[ij];
1414  double aux_hv =
1415  (uj * hvj - ui * hvi) * Cx[ij] + (vj * hvj - vi * hvi) * Cy[ij];
1416 
1417  /* HYPERBOLIC FLUX */
1418  hyp_flux_h[i] += aux_h;
1419  hyp_flux_hu[i] += aux_hu;
1420  hyp_flux_hv[i] += aux_hv;
1421 
1422  // EXTENDED SOURCE, USING 6.13 //
1423  extendedSourceTerm_hu[i] += g * hi * (hj + Zj) * Cx[ij];
1424  extendedSourceTerm_hv[i] += g * hi * (hj + Zj) * Cy[ij];
1425 
1426  new_SourceTerm_hu[i] +=
1427  g * (-hi * (Zj - Zi) + 0.5 * std::pow(hj - hi, 2)) * Cx[ij];
1428  new_SourceTerm_hv[i] +=
1429  g * (-hi * (Zj - Zi) + 0.5 * std::pow(hj - hi, 2)) * Cy[ij];
1430 
1431  // FOR SMOOTHNESS INDICATOR //
1432  alpha_numerator += hj - hi;
1433  alpha_denominator += fabs(hj - hi);
1434 
1435  // update ij
1436  ij += 1;
1437  } // end j loop
1438 
1439  // COMPUTE SMOOTHNESS INDICATOR //
1440  if (hi <= hEps) {
1441  alphai = 1.0;
1442  psi[i] = 1.0;
1443  } else {
1444  // Force alphai=0 in constant states
1445  if (fabs(alpha_numerator) <= hEps) {
1446  alphai = 0.;
1447  } else {
1448  alphai =
1449  (fabs(alpha_numerator) - hEps) / fabs(alpha_denominator - hEps);
1450  }
1451  alphai = fmax(alphai - alpha_zero, 0.0) * alpha_factor;
1452  if (POWER_SMOOTHNESS_INDICATOR == 0)
1453  psi[i] = 1.0;
1454  else
1455  psi[i] = std::pow(alphai, POWER_SMOOTHNESS_INDICATOR);
1456  }
1457  }
1458  // ********** END OF 2nd LOOP ON DOFS ********** //
1459 
1461  // ********** MAIN LOOP ON DOFs **********
1462  // To compute:
1463  // * dissipative terms
1464  // * bar states
1466 
1467  ij = 0;
1468  for (int i = 0; i < numDOFsPerEqn; i++) {
1469  double hi = h_dof_old[i];
1470  double hui = hu_dof_old[i];
1471  double hvi = hv_dof_old[i];
1472  double Zi = b_dof[i];
1473  double mi = lumped_mass_matrix[i];
1474 
1475  double one_over_hiReg =
1476  2.0 * hi / (hi * hi + std::pow(fmax(hi, hEps), 2));
1477  double ui = hui * one_over_hiReg;
1478  double vi = hvi * one_over_hiReg;
1479 
1480  // Define full pressure at ith node for definition of bar states below
1481  double pressure_i = 0.5 * g * hi * hi;
1482 
1483  // HIGH ORDER DISSIPATIVE TERMS, for Aij matrix
1484  double ith_dHij_minus_muHij_times_hStarStates = 0.,
1485  ith_dHij_minus_muHij_times_huStarStates = 0.,
1486  ith_dHij_minus_muHij_times_hvStarStates = 0.,
1487  ith_muHij_times_hStates = 0., ith_muHij_times_huStates = 0.,
1488  ith_muHij_times_hvStates = 0.;
1489 
1490  // loop over the sparsity pattern of the i-th DOF
1491  for (int offset = csrRowIndeces_DofLoops[i];
1492  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
1493  int j = csrColumnOffsets_DofLoops[offset];
1494  double hj = h_dof_old[j];
1495  double huj = hu_dof_old[j];
1496  double hvj = hv_dof_old[j];
1497  double Zj = b_dof[j];
1498  double one_over_hjReg =
1499  2.0 * hj / (hj * hj + std::pow(fmax(hj, hEps), 2));
1500  double uj = huj * one_over_hjReg;
1501  double vj = hvj * one_over_hjReg;
1502 
1503  // define pressure at jth node
1504  double pressure_j = 0.5 * g * hj * hj;
1505 
1506  // COMPUTE STAR STATES
1507  double hStarij = fmax(0., hi + Zi - fmax(Zi, Zj));
1508  double huStarij = hui * hStarij * one_over_hiReg;
1509  double hvStarij = hvi * hStarij * one_over_hiReg;
1510 
1511  double hStarji = fmax(0., hj + Zj - fmax(Zi, Zj));
1512  double huStarji = huj * hStarji * one_over_hjReg;
1513  double hvStarji = hvj * hStarji * one_over_hjReg;
1514 
1515  // Dissipative well balancing term
1516  double muLowij = 0., muLij = 0., muHij = 0.;
1517  double dLowij = 0., dLij = 0., dHij = 0.;
1518  if (i != j) // This is not necessary. See formula for
1519  // ith_dissipative_terms
1520  {
1522  // DISSIPATIVE MATRIX //
1524  if (lstage == 0)
1525  dLowij = dLow[ij];
1526  else {
1527  double cij_norm = sqrt(Cx[ij] * Cx[ij] + Cy[ij] * Cy[ij]);
1528  double cji_norm = sqrt(CTx[ij] * CTx[ij] + CTy[ij] * CTy[ij]);
1529  double nxij = Cx[ij] / cij_norm, nyij = Cy[ij] / cij_norm;
1530  double nxji = CTx[ij] / cji_norm, nyji = CTy[ij] / cji_norm;
1531  dLowij = fmax(
1532  maxWaveSpeedSharpInitialGuess(g, nxij, nyij, hi, hui, hvi, hj,
1533  huj, hvj, hEps, false) *
1534  cij_norm,
1535  maxWaveSpeedSharpInitialGuess(g, nxji, nyji, hj, huj, hvj, hi,
1536  hui, hvi, hEps, false) *
1537  cji_norm);
1538  }
1539  // this is standard low-order dij, can you use dLij =
1540  // dLowij*fmax(psi[i],psi[j]) if want smoothness based as low order
1541  dLij = dLowij;
1542 
1544  // WELL BALANCING DISSIPATIVE MATRIX //
1546  muLowij = fmax(fmax(0., -(ui * Cx[ij] + vi * Cy[ij])),
1547  fmax(0., (uj * Cx[ij] + vj * Cy[ij])));
1548  muLij = muLowij;
1549 
1550  // Define dLij as low order dijs
1551  muLij = muLowij;
1552  dLij = fmax(dLowij, muLij);
1553 
1554  // Then save dLow for limiting step, maybe a bit confusing
1555  dLow[ij] = fmax(dLij, muLij);
1556 
1558  // ENTROPY VISCOSITY //
1560  double dEVij = cE * fmax(global_entropy_residual[i],
1561  global_entropy_residual[j]);
1562  dHij = fmin(dLowij, dEVij);
1563  muHij = fmin(muLowij, dEVij);
1564 
1565  // compute dij_minus_muij times star solution terms
1566  // see: eqn (6.13)
1567  ith_dHij_minus_muHij_times_hStarStates +=
1568  (dHij - muHij) * (hStarji - hStarij);
1569  ith_dHij_minus_muHij_times_huStarStates +=
1570  (dHij - muHij) * (huStarji - huStarij);
1571  ith_dHij_minus_muHij_times_hvStarStates +=
1572  (dHij - muHij) * (hvStarji - hvStarij);
1573 
1574  // compute muij times solution terms
1575  ith_muHij_times_hStates += muHij * (hj - hi);
1576  ith_muHij_times_huStates += muHij * (huj - hui);
1577  ith_muHij_times_hvStates += muHij * (hvj - hvi);
1578 
1579  // compute dH_minus_dL
1580  dH_minus_dL[ij] = dHij - dLij;
1581  muH_minus_muL[ij] = muHij - muLij;
1582  } else // i==j
1583  {
1584  dH_minus_dL[ij] =
1585  0.; // Not true but the prod of this times Uj-Ui will be zero
1586  muH_minus_muL[ij] =
1587  0.; // Not true but the prod of this times Uj-Ui will be zero
1588  }
1589  // update ij
1590  ij += 1;
1591  } // j loop ends here
1592 
1593  /* Define global residual */
1594  if (LUMPED_MASS_MATRIX == 1) {
1595  globalResidual[offset_h + stride_h * i] =
1596  hi - dt / mi *
1597  (hyp_flux_h[i] - ith_dHij_minus_muHij_times_hStarStates -
1598  ith_muHij_times_hStates);
1599  globalResidual[offset_hu + stride_hu * i] =
1600  hui - dt / mi *
1601  ((hyp_flux_hu[i] + extendedSourceTerm_hu[i]) -
1602  ith_dHij_minus_muHij_times_huStarStates -
1603  ith_muHij_times_huStates);
1604  globalResidual[offset_hv + stride_hv * i] =
1605  hvi - dt / mi *
1606  ((hyp_flux_hv[i] + extendedSourceTerm_hv[i]) -
1607  ith_dHij_minus_muHij_times_hvStarStates -
1608  ith_muHij_times_hvStates);
1609  // clean up potential negative water height due to machine precision
1610  if (globalResidual[offset_h + stride_h * i] >= -hEps &&
1611  globalResidual[offset_h + stride_h * i] < hEps)
1612  globalResidual[offset_h + stride_h * i] = 0;
1613  } else {
1614  // Distribute residual
1615  // NOTE: MASS MATRIX IS CONSISTENT
1616  globalResidual[offset_h + stride_h * i] +=
1617  dt * (hyp_flux_h[i] - ith_dHij_minus_muHij_times_hStarStates -
1618  ith_muHij_times_hStates);
1619  globalResidual[offset_hu + stride_hu * i] +=
1620  dt * (hyp_flux_hu[i] + extendedSourceTerm_hu[i] -
1621  ith_dHij_minus_muHij_times_huStarStates -
1622  ith_muHij_times_huStates);
1623  globalResidual[offset_hv + stride_hv * i] +=
1624  dt * (hyp_flux_hv[i] + extendedSourceTerm_hv[i] -
1625  ith_dHij_minus_muHij_times_hvStarStates -
1626  ith_muHij_times_hvStates);
1627  }
1628  }
1629  // ********** END OF LOOP IN DOFs ********** //
1630  } // end SECOND_CALL_CALCULATE_RESIDUAL
1631 
1632  // ********** COMPUTE NORMALS ********** //
1633  if (COMPUTE_NORMALS == 1) {
1634  // This is to identify the normals and create a vector of normal
1635  // components
1636  for (int ebNE = 0; ebNE < nExteriorElementBoundaries_global; ebNE++) {
1637  register int
1638  ebN = exteriorElementBoundariesArray[ebNE],
1639  eN = elementBoundaryElementsArray[ebN * 2 + 0],
1640  ebN_local = elementBoundaryLocalElementBoundariesArray[ebN * 2 + 0];
1641  register double normal[3];
1642  { // "Loop" in quad points
1643  int kb = 0; // NOTE: I need to consider just one quad point since
1644  // the element is not curved so the normal is constant
1645  // per element
1646  register int ebN_local_kb =
1647  ebN_local * nQuadraturePoints_elementBoundary + kb;
1648  register double jac_ext[nSpace * nSpace], jacDet_ext,
1649  jacInv_ext[nSpace * nSpace], boundaryJac[nSpace * (nSpace - 1)],
1650  metricTensor[(nSpace - 1) * (nSpace - 1)], metricTensorDetSqrt,
1651  x_ext, y_ext;
1652  /* compute information about mapping from reference element to
1653  * physical element */
1654  ck.calculateMapping_elementBoundary(
1655  eN, ebN_local, kb, ebN_local_kb, mesh_dof.data(), mesh_l2g.data(),
1656  mesh_trial_trace_ref.data(), mesh_grad_trial_trace_ref.data(),
1657  boundaryJac_ref.data(), jac_ext, jacDet_ext, jacInv_ext,
1658  boundaryJac, metricTensor, metricTensorDetSqrt, normal_ref.data(),
1659  normal, x_ext, y_ext);
1660  }
1661  // distribute the normal vectors
1662  for (int i = 0; i < nDOF_test_element; i++) {
1663  int eN_i = eN * nDOF_test_element + i;
1664  int gi = h_l2g[eN_i];
1665  normalx[gi] += 0.5 * normal[0] * (i == ebN_local ? 0. : 1.);
1666  normaly[gi] += 0.5 * normal[1] * (i == ebN_local ? 0. : 1.);
1667  }
1668  }
1669  // normalize
1670  for (int gi = 0; gi < numDOFsPerEqn; gi++) {
1671  double norm_factor =
1672  sqrt(std::pow(normalx[gi], 2) + std::pow(normaly[gi], 2));
1673  if (norm_factor != 0) {
1674  normalx[gi] /= norm_factor;
1675  normaly[gi] /= norm_factor;
1676  }
1677  }
1678  }
1679  // ********** END OF COMPUTING NORMALS ********** //
1680  } // end calculateResidual
1681 
1683  xt::pyarray<double> &mesh_trial_ref = args.array<double>("mesh_trial_ref");
1684  xt::pyarray<double> &mesh_grad_trial_ref =
1685  args.array<double>("mesh_grad_trial_ref");
1686  xt::pyarray<double> &mesh_dof = args.array<double>("mesh_dof");
1687  xt::pyarray<double> &mesh_velocity_dof =
1688  args.array<double>("mesh_velocity_dof");
1689  double MOVING_DOMAIN = args.scalar<double>("MOVING_DOMAIN");
1690  xt::pyarray<int> &mesh_l2g = args.array<int>("mesh_l2g");
1691  xt::pyarray<double> &dV_ref = args.array<double>("dV_ref");
1692  xt::pyarray<double> &h_trial_ref = args.array<double>("h_trial_ref");
1693  xt::pyarray<double> &h_grad_trial_ref =
1694  args.array<double>("h_grad_trial_ref");
1695  xt::pyarray<double> &h_test_ref = args.array<double>("h_test_ref");
1696  xt::pyarray<double> &h_grad_test_ref =
1697  args.array<double>("h_grad_test_ref");
1698  xt::pyarray<double> &vel_trial_ref = args.array<double>("vel_trial_ref");
1699  xt::pyarray<double> &vel_grad_trial_ref =
1700  args.array<double>("vel_grad_trial_ref");
1701  xt::pyarray<double> &vel_test_ref = args.array<double>("vel_test_ref");
1702  xt::pyarray<double> &vel_grad_test_ref =
1703  args.array<double>("vel_grad_test_ref");
1704  xt::pyarray<double> &mesh_trial_trace_ref =
1705  args.array<double>("mesh_trial_trace_ref");
1706  xt::pyarray<double> &mesh_grad_trial_trace_ref =
1707  args.array<double>("mesh_grad_trial_trace_ref");
1708  xt::pyarray<double> &dS_ref = args.array<double>("dS_ref");
1709  xt::pyarray<double> &h_trial_trace_ref =
1710  args.array<double>("h_trial_trace_ref");
1711  xt::pyarray<double> &h_grad_trial_trace_ref =
1712  args.array<double>("h_grad_trial_trace_ref");
1713  xt::pyarray<double> &h_test_trace_ref =
1714  args.array<double>("h_test_trace_ref");
1715  xt::pyarray<double> &h_grad_test_trace_ref =
1716  args.array<double>("h_grad_test_trace_ref");
1717  xt::pyarray<double> &vel_trial_trace_ref =
1718  args.array<double>("vel_trial_trace_ref");
1719  xt::pyarray<double> &vel_grad_trial_trace_ref =
1720  args.array<double>("vel_grad_trial_trace_ref");
1721  xt::pyarray<double> &vel_test_trace_ref =
1722  args.array<double>("vel_test_trace_ref");
1723  xt::pyarray<double> &vel_grad_test_trace_ref =
1724  args.array<double>("vel_grad_test_trace_ref");
1725  xt::pyarray<double> &normal_ref = args.array<double>("normal_ref");
1726  xt::pyarray<double> &boundaryJac_ref =
1727  args.array<double>("boundaryJac_ref");
1728  xt::pyarray<double> &elementDiameter =
1729  args.array<double>("elementDiameter");
1730  int nElements_global = args.scalar<int>("nElements_global");
1731  double g = args.scalar<double>("g");
1732  xt::pyarray<int> &h_l2g = args.array<int>("h_l2g");
1733  xt::pyarray<int> &vel_l2g = args.array<int>("vel_l2g");
1734  xt::pyarray<double> &b_dof = args.array<double>("b_dof");
1735  xt::pyarray<double> &h_dof = args.array<double>("h_dof");
1736  xt::pyarray<double> &hu_dof = args.array<double>("hu_dof");
1737  xt::pyarray<double> &hv_dof = args.array<double>("hv_dof");
1738  xt::pyarray<double> &q_cfl = args.array<double>("q_cfl");
1739  xt::pyarray<int> &sdInfo_hu_hu_rowptr =
1740  args.array<int>("sdInfo_hu_hu_rowptr");
1741  xt::pyarray<int> &sdInfo_hu_hu_colind =
1742  args.array<int>("sdInfo_hu_hu_colind");
1743  xt::pyarray<int> &sdInfo_hu_hv_rowptr =
1744  args.array<int>("sdInfo_hu_hv_rowptr");
1745  xt::pyarray<int> &sdInfo_hu_hv_colind =
1746  args.array<int>("sdInfo_hu_hv_colind");
1747  xt::pyarray<int> &sdInfo_hv_hv_rowptr =
1748  args.array<int>("sdInfo_hv_hv_rowptr");
1749  xt::pyarray<int> &sdInfo_hv_hv_colind =
1750  args.array<int>("sdInfo_hv_hv_colind");
1751  xt::pyarray<int> &sdInfo_hv_hu_rowptr =
1752  args.array<int>("sdInfo_hv_hu_rowptr");
1753  xt::pyarray<int> &sdInfo_hv_hu_colind =
1754  args.array<int>("sdInfo_hv_hu_colind");
1755  xt::pyarray<int> &csrRowIndeces_h_h = args.array<int>("csrRowIndeces_h_h");
1756  xt::pyarray<int> &csrColumnOffsets_h_h =
1757  args.array<int>("csrColumnOffsets_h_h");
1758  xt::pyarray<int> &csrRowIndeces_h_hu =
1759  args.array<int>("csrRowIndeces_h_hu");
1760  xt::pyarray<int> &csrColumnOffsets_h_hu =
1761  args.array<int>("csrColumnOffsets_h_hu");
1762  xt::pyarray<int> &csrRowIndeces_h_hv =
1763  args.array<int>("csrRowIndeces_h_hv");
1764  xt::pyarray<int> &csrColumnOffsets_h_hv =
1765  args.array<int>("csrColumnOffsets_h_hv");
1766  xt::pyarray<int> &csrRowIndeces_hu_h =
1767  args.array<int>("csrRowIndeces_hu_h");
1768  xt::pyarray<int> &csrColumnOffsets_hu_h =
1769  args.array<int>("csrColumnOffsets_hu_h");
1770  xt::pyarray<int> &csrRowIndeces_hu_hu =
1771  args.array<int>("csrRowIndeces_hu_hu");
1772  xt::pyarray<int> &csrColumnOffsets_hu_hu =
1773  args.array<int>("csrColumnOffsets_hu_hu");
1774  xt::pyarray<int> &csrRowIndeces_hu_hv =
1775  args.array<int>("csrRowIndeces_hu_hv");
1776  xt::pyarray<int> &csrColumnOffsets_hu_hv =
1777  args.array<int>("csrColumnOffsets_hu_hv");
1778  xt::pyarray<int> &csrRowIndeces_hv_h =
1779  args.array<int>("csrRowIndeces_hv_h");
1780  xt::pyarray<int> &csrColumnOffsets_hv_h =
1781  args.array<int>("csrColumnOffsets_hv_h");
1782  xt::pyarray<int> &csrRowIndeces_hv_hu =
1783  args.array<int>("csrRowIndeces_hv_hu");
1784  xt::pyarray<int> &csrColumnOffsets_hv_hu =
1785  args.array<int>("csrColumnOffsets_hv_hu");
1786  xt::pyarray<int> &csrRowIndeces_hv_hv =
1787  args.array<int>("csrRowIndeces_hv_hv");
1788  xt::pyarray<int> &csrColumnOffsets_hv_hv =
1789  args.array<int>("csrColumnOffsets_hv_hv");
1790  xt::pyarray<double> &globalJacobian = args.array<double>("globalJacobian");
1791  int nExteriorElementBoundaries_global =
1792  args.scalar<int>("nExteriorElementBoundaries_global");
1793  xt::pyarray<int> &exteriorElementBoundariesArray =
1794  args.array<int>("exteriorElementBoundariesArray");
1795  xt::pyarray<int> &elementBoundaryElementsArray =
1796  args.array<int>("elementBoundaryElementsArray");
1797  xt::pyarray<int> &elementBoundaryLocalElementBoundariesArray =
1798  args.array<int>("elementBoundaryLocalElementBoundariesArray");
1799  xt::pyarray<int> &isDOFBoundary_h = args.array<int>("isDOFBoundary_h");
1800  xt::pyarray<int> &isDOFBoundary_hu = args.array<int>("isDOFBoundary_hu");
1801  xt::pyarray<int> &isDOFBoundary_hv = args.array<int>("isDOFBoundary_hv");
1802  xt::pyarray<int> &isAdvectiveFluxBoundary_h =
1803  args.array<int>("isAdvectiveFluxBoundary_h");
1804  xt::pyarray<int> &isAdvectiveFluxBoundary_hu =
1805  args.array<int>("isAdvectiveFluxBoundary_hu");
1806  xt::pyarray<int> &isAdvectiveFluxBoundary_hv =
1807  args.array<int>("isAdvectiveFluxBoundary_hv");
1808  xt::pyarray<int> &isDiffusiveFluxBoundary_hu =
1809  args.array<int>("isDiffusiveFluxBoundary_hu");
1810  xt::pyarray<int> &isDiffusiveFluxBoundary_hv =
1811  args.array<int>("isDiffusiveFluxBoundary_hv");
1812  xt::pyarray<double> &ebqe_bc_h_ext = args.array<double>("ebqe_bc_h_ext");
1813  xt::pyarray<double> &ebqe_bc_flux_mass_ext =
1814  args.array<double>("ebqe_bc_flux_mass_ext");
1815  xt::pyarray<double> &ebqe_bc_flux_mom_hu_adv_ext =
1816  args.array<double>("ebqe_bc_flux_mom_hu_adv_ext");
1817  xt::pyarray<double> &ebqe_bc_flux_mom_hv_adv_ext =
1818  args.array<double>("ebqe_bc_flux_mom_hv_adv_ext");
1819  xt::pyarray<double> &ebqe_bc_hu_ext = args.array<double>("ebqe_bc_hu_ext");
1820  xt::pyarray<double> &ebqe_bc_flux_hu_diff_ext =
1821  args.array<double>("ebqe_bc_flux_hu_diff_ext");
1822  xt::pyarray<double> &ebqe_penalty_ext =
1823  args.array<double>("ebqe_penalty_ext");
1824  xt::pyarray<double> &ebqe_bc_hv_ext = args.array<double>("ebqe_bc_hv_ext");
1825  xt::pyarray<double> &ebqe_bc_flux_hv_diff_ext =
1826  args.array<double>("ebqe_bc_flux_hv_diff_ext");
1827  xt::pyarray<int> &csrColumnOffsets_eb_h_h =
1828  args.array<int>("csrColumnOffsets_eb_h_h");
1829  xt::pyarray<int> &csrColumnOffsets_eb_h_hu =
1830  args.array<int>("csrColumnOffsets_eb_h_hu");
1831  xt::pyarray<int> &csrColumnOffsets_eb_h_hv =
1832  args.array<int>("csrColumnOffsets_eb_h_hv");
1833  xt::pyarray<int> &csrColumnOffsets_eb_hu_h =
1834  args.array<int>("csrColumnOffsets_eb_hu_h");
1835  xt::pyarray<int> &csrColumnOffsets_eb_hu_hu =
1836  args.array<int>("csrColumnOffsets_eb_hu_hu");
1837  xt::pyarray<int> &csrColumnOffsets_eb_hu_hv =
1838  args.array<int>("csrColumnOffsets_eb_hu_hv");
1839  xt::pyarray<int> &csrColumnOffsets_eb_hv_h =
1840  args.array<int>("csrColumnOffsets_eb_hv_h");
1841  xt::pyarray<int> &csrColumnOffsets_eb_hv_hu =
1842  args.array<int>("csrColumnOffsets_eb_hv_hu");
1843  xt::pyarray<int> &csrColumnOffsets_eb_hv_hv =
1844  args.array<int>("csrColumnOffsets_eb_hv_hv");
1845  double dt = args.scalar<double>("dt");
1846  //
1847  // loop over elements to compute volume integrals and load them into the
1848  // element Jacobians and global Jacobian
1849  //
1850  for (int eN = 0; eN < nElements_global; eN++) {
1851  register double elementJacobian_h_h[nDOF_test_element]
1852  [nDOF_trial_element],
1853  elementJacobian_hu_hu[nDOF_test_element][nDOF_trial_element],
1854  elementJacobian_hv_hv[nDOF_test_element][nDOF_trial_element];
1855  for (int i = 0; i < nDOF_test_element; i++)
1856  for (int j = 0; j < nDOF_trial_element; j++) {
1857  elementJacobian_h_h[i][j] = 0.0;
1858  elementJacobian_hu_hu[i][j] = 0.0;
1859  elementJacobian_hv_hv[i][j] = 0.0;
1860  }
1861  for (int k = 0; k < nQuadraturePoints_element; k++) {
1862  int eN_k = eN * nQuadraturePoints_element +
1863  k, // index to a scalar at a quadrature point
1864  eN_k_nSpace = eN_k * nSpace,
1865  eN_nDOF_trial_element =
1866  eN *
1867  nDOF_trial_element; // index to a vector at a quadrature point
1868 
1869  // declare local storage
1870  register double jac[nSpace * nSpace], jacDet, jacInv[nSpace * nSpace],
1871  dV, h_test_dV[nDOF_test_element], vel_test_dV[nDOF_test_element], x,
1872  y, xt, yt;
1873  // get jacobian, etc for mapping reference element
1874  ck.calculateMapping_element(
1875  eN, k, mesh_dof.data(), mesh_l2g.data(), mesh_trial_ref.data(),
1876  mesh_grad_trial_ref.data(), jac, jacDet, jacInv, x, y);
1877  // get the physical integration weight
1878  dV = fabs(jacDet) * dV_ref[k];
1879  // precalculate test function products with integration weights
1880  for (int j = 0; j < nDOF_trial_element; j++) {
1881  h_test_dV[j] = h_test_ref[k * nDOF_trial_element + j] * dV;
1882  vel_test_dV[j] = vel_test_ref[k * nDOF_trial_element + j] * dV;
1883  }
1884  for (int i = 0; i < nDOF_test_element; i++) {
1885  register int i_nSpace = i * nSpace;
1886  for (int j = 0; j < nDOF_trial_element; j++) {
1887  register int j_nSpace = j * nSpace;
1888  elementJacobian_h_h[i][j] +=
1889  h_trial_ref[k * nDOF_trial_element + j] * h_test_dV[i];
1890  elementJacobian_hu_hu[i][j] +=
1891  vel_trial_ref[k * nDOF_trial_element + j] * vel_test_dV[i];
1892  elementJacobian_hv_hv[i][j] +=
1893  vel_trial_ref[k * nDOF_trial_element + j] * vel_test_dV[i];
1894  } // j
1895  } // i
1896  } // k
1897  //
1898  // load into element Jacobian into global Jacobian
1899  //
1900  for (int i = 0; i < nDOF_test_element; i++) {
1901  register int eN_i = eN * nDOF_test_element + i;
1902  for (int j = 0; j < nDOF_trial_element; j++) {
1903  register int eN_i_j = eN_i * nDOF_trial_element + j;
1904  globalJacobian[csrRowIndeces_h_h[eN_i] +
1905  csrColumnOffsets_h_h[eN_i_j]] +=
1906  elementJacobian_h_h[i][j];
1907  globalJacobian[csrRowIndeces_hu_hu[eN_i] +
1908  csrColumnOffsets_hu_hu[eN_i_j]] +=
1909  elementJacobian_hu_hu[i][j];
1910  globalJacobian[csrRowIndeces_hv_hv[eN_i] +
1911  csrColumnOffsets_hv_hv[eN_i_j]] +=
1912  elementJacobian_hv_hv[i][j];
1913  } // j
1914  } // i
1915  } // elements
1916  }
1917 
1919  xt::pyarray<double> &mesh_trial_ref = args.array<double>("mesh_trial_ref");
1920  xt::pyarray<double> &mesh_grad_trial_ref =
1921  args.array<double>("mesh_grad_trial_ref");
1922  xt::pyarray<double> &mesh_dof = args.array<double>("mesh_dof");
1923  xt::pyarray<double> &mesh_velocity_dof =
1924  args.array<double>("mesh_velocity_dof");
1925  double MOVING_DOMAIN = args.scalar<double>("MOVING_DOMAIN");
1926  xt::pyarray<int> &mesh_l2g = args.array<int>("mesh_l2g");
1927  xt::pyarray<double> &dV_ref = args.array<double>("dV_ref");
1928  xt::pyarray<double> &h_trial_ref = args.array<double>("h_trial_ref");
1929  xt::pyarray<double> &h_grad_trial_ref =
1930  args.array<double>("h_grad_trial_ref");
1931  xt::pyarray<double> &h_test_ref = args.array<double>("h_test_ref");
1932  xt::pyarray<double> &h_grad_test_ref =
1933  args.array<double>("h_grad_test_ref");
1934  xt::pyarray<double> &vel_trial_ref = args.array<double>("vel_trial_ref");
1935  xt::pyarray<double> &vel_grad_trial_ref =
1936  args.array<double>("vel_grad_trial_ref");
1937  xt::pyarray<double> &vel_test_ref = args.array<double>("vel_test_ref");
1938  xt::pyarray<double> &vel_grad_test_ref =
1939  args.array<double>("vel_grad_test_ref");
1940  xt::pyarray<double> &mesh_trial_trace_ref =
1941  args.array<double>("mesh_trial_trace_ref");
1942  xt::pyarray<double> &mesh_grad_trial_trace_ref =
1943  args.array<double>("mesh_grad_trial_trace_ref");
1944  xt::pyarray<double> &dS_ref = args.array<double>("dS_ref");
1945  xt::pyarray<double> &h_trial_trace_ref =
1946  args.array<double>("h_trial_trace_ref");
1947  xt::pyarray<double> &h_grad_trial_trace_ref =
1948  args.array<double>("h_grad_trial_trace_ref");
1949  xt::pyarray<double> &h_test_trace_ref =
1950  args.array<double>("h_test_trace_ref");
1951  xt::pyarray<double> &h_grad_test_trace_ref =
1952  args.array<double>("h_grad_test_trace_ref");
1953  xt::pyarray<double> &vel_trial_trace_ref =
1954  args.array<double>("vel_trial_trace_ref");
1955  xt::pyarray<double> &vel_grad_trial_trace_ref =
1956  args.array<double>("vel_grad_trial_trace_ref");
1957  xt::pyarray<double> &vel_test_trace_ref =
1958  args.array<double>("vel_test_trace_ref");
1959  xt::pyarray<double> &vel_grad_test_trace_ref =
1960  args.array<double>("vel_grad_test_trace_ref");
1961  xt::pyarray<double> &normal_ref = args.array<double>("normal_ref");
1962  xt::pyarray<double> &boundaryJac_ref =
1963  args.array<double>("boundaryJac_ref");
1964  xt::pyarray<double> &elementDiameter =
1965  args.array<double>("elementDiameter");
1966  int nElements_global = args.scalar<int>("nElements_global");
1967  double g = args.scalar<double>("g");
1968  xt::pyarray<int> &h_l2g = args.array<int>("h_l2g");
1969  xt::pyarray<int> &vel_l2g = args.array<int>("vel_l2g");
1970  xt::pyarray<double> &b_dof = args.array<double>("b_dof");
1971  xt::pyarray<double> &h_dof = args.array<double>("h_dof");
1972  xt::pyarray<double> &hu_dof = args.array<double>("hu_dof");
1973  xt::pyarray<double> &hv_dof = args.array<double>("hv_dof");
1974  xt::pyarray<double> &q_cfl = args.array<double>("q_cfl");
1975  xt::pyarray<int> &sdInfo_hu_hu_rowptr =
1976  args.array<int>("sdInfo_hu_hu_rowptr");
1977  xt::pyarray<int> &sdInfo_hu_hu_colind =
1978  args.array<int>("sdInfo_hu_hu_colind");
1979  xt::pyarray<int> &sdInfo_hu_hv_rowptr =
1980  args.array<int>("sdInfo_hu_hv_rowptr");
1981  xt::pyarray<int> &sdInfo_hu_hv_colind =
1982  args.array<int>("sdInfo_hu_hv_colind");
1983  xt::pyarray<int> &sdInfo_hv_hv_rowptr =
1984  args.array<int>("sdInfo_hv_hv_rowptr");
1985  xt::pyarray<int> &sdInfo_hv_hv_colind =
1986  args.array<int>("sdInfo_hv_hv_colind");
1987  xt::pyarray<int> &sdInfo_hv_hu_rowptr =
1988  args.array<int>("sdInfo_hv_hu_rowptr");
1989  xt::pyarray<int> &sdInfo_hv_hu_colind =
1990  args.array<int>("sdInfo_hv_hu_colind");
1991  xt::pyarray<int> &csrRowIndeces_h_h = args.array<int>("csrRowIndeces_h_h");
1992  xt::pyarray<int> &csrColumnOffsets_h_h =
1993  args.array<int>("csrColumnOffsets_h_h");
1994  xt::pyarray<int> &csrRowIndeces_h_hu =
1995  args.array<int>("csrRowIndeces_h_hu");
1996  xt::pyarray<int> &csrColumnOffsets_h_hu =
1997  args.array<int>("csrColumnOffsets_h_hu");
1998  xt::pyarray<int> &csrRowIndeces_h_hv =
1999  args.array<int>("csrRowIndeces_h_hv");
2000  xt::pyarray<int> &csrColumnOffsets_h_hv =
2001  args.array<int>("csrColumnOffsets_h_hv");
2002  xt::pyarray<int> &csrRowIndeces_hu_h =
2003  args.array<int>("csrRowIndeces_hu_h");
2004  xt::pyarray<int> &csrColumnOffsets_hu_h =
2005  args.array<int>("csrColumnOffsets_hu_h");
2006  xt::pyarray<int> &csrRowIndeces_hu_hu =
2007  args.array<int>("csrRowIndeces_hu_hu");
2008  xt::pyarray<int> &csrColumnOffsets_hu_hu =
2009  args.array<int>("csrColumnOffsets_hu_hu");
2010  xt::pyarray<int> &csrRowIndeces_hu_hv =
2011  args.array<int>("csrRowIndeces_hu_hv");
2012  xt::pyarray<int> &csrColumnOffsets_hu_hv =
2013  args.array<int>("csrColumnOffsets_hu_hv");
2014  xt::pyarray<int> &csrRowIndeces_hv_h =
2015  args.array<int>("csrRowIndeces_hv_h");
2016  xt::pyarray<int> &csrColumnOffsets_hv_h =
2017  args.array<int>("csrColumnOffsets_hv_h");
2018  xt::pyarray<int> &csrRowIndeces_hv_hu =
2019  args.array<int>("csrRowIndeces_hv_hu");
2020  xt::pyarray<int> &csrColumnOffsets_hv_hu =
2021  args.array<int>("csrColumnOffsets_hv_hu");
2022  xt::pyarray<int> &csrRowIndeces_hv_hv =
2023  args.array<int>("csrRowIndeces_hv_hv");
2024  xt::pyarray<int> &csrColumnOffsets_hv_hv =
2025  args.array<int>("csrColumnOffsets_hv_hv");
2026  xt::pyarray<double> &globalJacobian = args.array<double>("globalJacobian");
2027  int nExteriorElementBoundaries_global =
2028  args.scalar<int>("nExteriorElementBoundaries_global");
2029  xt::pyarray<int> &exteriorElementBoundariesArray =
2030  args.array<int>("exteriorElementBoundariesArray");
2031  xt::pyarray<int> &elementBoundaryElementsArray =
2032  args.array<int>("elementBoundaryElementsArray");
2033  xt::pyarray<int> &elementBoundaryLocalElementBoundariesArray =
2034  args.array<int>("elementBoundaryLocalElementBoundariesArray");
2035  xt::pyarray<int> &isDOFBoundary_h = args.array<int>("isDOFBoundary_h");
2036  xt::pyarray<int> &isDOFBoundary_hu = args.array<int>("isDOFBoundary_hu");
2037  xt::pyarray<int> &isDOFBoundary_hv = args.array<int>("isDOFBoundary_hv");
2038  xt::pyarray<int> &isAdvectiveFluxBoundary_h =
2039  args.array<int>("isAdvectiveFluxBoundary_h");
2040  xt::pyarray<int> &isAdvectiveFluxBoundary_hu =
2041  args.array<int>("isAdvectiveFluxBoundary_hu");
2042  xt::pyarray<int> &isAdvectiveFluxBoundary_hv =
2043  args.array<int>("isAdvectiveFluxBoundary_hv");
2044  xt::pyarray<int> &isDiffusiveFluxBoundary_hu =
2045  args.array<int>("isDiffusiveFluxBoundary_hu");
2046  xt::pyarray<int> &isDiffusiveFluxBoundary_hv =
2047  args.array<int>("isDiffusiveFluxBoundary_hv");
2048  xt::pyarray<double> &ebqe_bc_h_ext = args.array<double>("ebqe_bc_h_ext");
2049  xt::pyarray<double> &ebqe_bc_flux_mass_ext =
2050  args.array<double>("ebqe_bc_flux_mass_ext");
2051  xt::pyarray<double> &ebqe_bc_flux_mom_hu_adv_ext =
2052  args.array<double>("ebqe_bc_flux_mom_hu_adv_ext");
2053  xt::pyarray<double> &ebqe_bc_flux_mom_hv_adv_ext =
2054  args.array<double>("ebqe_bc_flux_mom_hv_adv_ext");
2055  xt::pyarray<double> &ebqe_bc_hu_ext = args.array<double>("ebqe_bc_hu_ext");
2056  xt::pyarray<double> &ebqe_bc_flux_hu_diff_ext =
2057  args.array<double>("ebqe_bc_flux_hu_diff_ext");
2058  xt::pyarray<double> &ebqe_penalty_ext =
2059  args.array<double>("ebqe_penalty_ext");
2060  xt::pyarray<double> &ebqe_bc_hv_ext = args.array<double>("ebqe_bc_hv_ext");
2061  xt::pyarray<double> &ebqe_bc_flux_hv_diff_ext =
2062  args.array<double>("ebqe_bc_flux_hv_diff_ext");
2063  xt::pyarray<int> &csrColumnOffsets_eb_h_h =
2064  args.array<int>("csrColumnOffsets_eb_h_h");
2065  xt::pyarray<int> &csrColumnOffsets_eb_h_hu =
2066  args.array<int>("csrColumnOffsets_eb_h_hu");
2067  xt::pyarray<int> &csrColumnOffsets_eb_h_hv =
2068  args.array<int>("csrColumnOffsets_eb_h_hv");
2069  xt::pyarray<int> &csrColumnOffsets_eb_hu_h =
2070  args.array<int>("csrColumnOffsets_eb_hu_h");
2071  xt::pyarray<int> &csrColumnOffsets_eb_hu_hu =
2072  args.array<int>("csrColumnOffsets_eb_hu_hu");
2073  xt::pyarray<int> &csrColumnOffsets_eb_hu_hv =
2074  args.array<int>("csrColumnOffsets_eb_hu_hv");
2075  xt::pyarray<int> &csrColumnOffsets_eb_hv_h =
2076  args.array<int>("csrColumnOffsets_eb_hv_h");
2077  xt::pyarray<int> &csrColumnOffsets_eb_hv_hu =
2078  args.array<int>("csrColumnOffsets_eb_hv_hu");
2079  xt::pyarray<int> &csrColumnOffsets_eb_hv_hv =
2080  args.array<int>("csrColumnOffsets_eb_hv_hv");
2081  double dt = args.scalar<double>("dt");
2082  //
2083  // loop over elements to compute volume integrals and load them into the
2084  // element Jacobians and global Jacobian
2085  //
2086  for (int eN = 0; eN < nElements_global; eN++) {
2087  register double elementJacobian_h_h[nDOF_test_element]
2088  [nDOF_trial_element],
2089  elementJacobian_hu_hu[nDOF_test_element][nDOF_trial_element],
2090  elementJacobian_hv_hv[nDOF_test_element][nDOF_trial_element];
2091  for (int i = 0; i < nDOF_test_element; i++)
2092  for (int j = 0; j < nDOF_trial_element; j++) {
2093  elementJacobian_h_h[i][j] = 0.0;
2094  elementJacobian_hu_hu[i][j] = 0.0;
2095  elementJacobian_hv_hv[i][j] = 0.0;
2096  }
2097  for (int k = 0; k < nQuadraturePoints_element; k++) {
2098  int eN_k = eN * nQuadraturePoints_element +
2099  k, // index to a scalar at a quadrature point
2100  eN_k_nSpace = eN_k * nSpace,
2101  eN_nDOF_trial_element =
2102  eN *
2103  nDOF_trial_element; // index to a vector at a quadrature point
2104 
2105  // declare local storage
2106  register double jac[nSpace * nSpace], jacDet, jacInv[nSpace * nSpace],
2107  dV, h_test_dV[nDOF_test_element], vel_test_dV[nDOF_test_element], x,
2108  y, xt, yt;
2109  // get jacobian, etc for mapping reference element
2110  ck.calculateMapping_element(
2111  eN, k, mesh_dof.data(), mesh_l2g.data(), mesh_trial_ref.data(),
2112  mesh_grad_trial_ref.data(), jac, jacDet, jacInv, x, y);
2113  // get the physical integration weight
2114  dV = fabs(jacDet) * dV_ref[k];
2115  // precalculate test function products with integration weights
2116  for (int j = 0; j < nDOF_trial_element; j++) {
2117  h_test_dV[j] = h_test_ref[k * nDOF_trial_element + j] * dV;
2118  vel_test_dV[j] = vel_test_ref[k * nDOF_trial_element + j] * dV;
2119  }
2120 
2121  for (int i = 0; i < nDOF_test_element; i++) {
2122  register int i_nSpace = i * nSpace;
2123  for (int j = 0; j < nDOF_trial_element; j++) {
2124  register int j_nSpace = j * nSpace;
2125  elementJacobian_h_h[i][j] += (i == j ? 1.0 : 0.0) * h_test_dV[i];
2126  elementJacobian_hu_hu[i][j] +=
2127  (i == j ? 1.0 : 0.0) * vel_test_dV[i];
2128  elementJacobian_hv_hv[i][j] +=
2129  (i == j ? 1.0 : 0.0) * vel_test_dV[i];
2130  } // j
2131  } // i
2132  } // k
2133  //
2134  // load into element Jacobian into global Jacobian
2135  //
2136  for (int i = 0; i < nDOF_test_element; i++) {
2137  register int eN_i = eN * nDOF_test_element + i;
2138  for (int j = 0; j < nDOF_trial_element; j++) {
2139  register int eN_i_j = eN_i * nDOF_trial_element + j;
2140  globalJacobian[csrRowIndeces_h_h[eN_i] +
2141  csrColumnOffsets_h_h[eN_i_j]] +=
2142  elementJacobian_h_h[i][j];
2143  globalJacobian[csrRowIndeces_hu_hu[eN_i] +
2144  csrColumnOffsets_hu_hu[eN_i_j]] +=
2145  elementJacobian_hu_hu[i][j];
2146  globalJacobian[csrRowIndeces_hv_hv[eN_i] +
2147  csrColumnOffsets_hv_hv[eN_i_j]] +=
2148  elementJacobian_hv_hv[i][j];
2149  } // j
2150  } // i
2151  } // elements
2152  }
2153 }; // namespace proteus
2154 
2155 inline SW2DCV_base *newSW2DCV(int nSpaceIn, int nQuadraturePoints_elementIn,
2156  int nDOF_mesh_trial_elementIn,
2157  int nDOF_trial_elementIn, int nDOF_test_elementIn,
2158  int nQuadraturePoints_elementBoundaryIn,
2159  int CompKernelFlag) {
2161  CompKernel>(
2162  nSpaceIn, nQuadraturePoints_elementIn, nDOF_mesh_trial_elementIn,
2163  nDOF_trial_elementIn, nDOF_test_elementIn,
2164  nQuadraturePoints_elementBoundaryIn, CompKernelFlag);
2165 }
2166 } // end namespace proteus
2167 
2168 #endif
proteus::SW2DCV::SW2DCV
SW2DCV()
Definition: SW2DCV.h:150
proteus::fp
double fp(const double &g, const double &h, const double &hZ)
Definition: SW2DCV.h:66
LIMITING_ITERATION
#define LIMITING_ITERATION
Definition: SW2DCV.h:17
proteus::SW2DCV_base::calculateMassMatrix
virtual void calculateMassMatrix(arguments_dict &args)=0
proteus::SW2DCV::convexLimiting
void convexLimiting(arguments_dict &args)
Definition: SW2DCV.h:234
VEL_FIX_POWER
#define VEL_FIX_POWER
Definition: SW2DCV.h:15
proteus::SW2DCV::nDOF_test_X_trial_element
const int nDOF_test_X_trial_element
Definition: SW2DCV.h:148
proteus::SW2DCV_base::calculateResidual
virtual void calculateResidual(arguments_dict &args)=0
proteus::DENTROPY_DHU
double DENTROPY_DHU(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:61
POWER_SMOOTHNESS_INDICATOR
#define POWER_SMOOTHNESS_INDICATOR
Definition: SW2DCV.h:14
proteus::SW2DCV_base::convexLimiting
virtual void convexLimiting(arguments_dict &args)=0
proteus::DENTROPY_DH
double DENTROPY_DH(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:56
proteus::SW2DCV::ck
CompKernelType ck
Definition: SW2DCV.h:149
proteus::SW2DCV_base::calculateEV
virtual void calculateEV(arguments_dict &args)=0
proteus::SW2DCV_base
Definition: SW2DCV.h:132
ModelFactory.h
CompKernel.h
proteus::arguments_dict::scalar
T & scalar(const std::string &key)
proteus::phiDDiff1
double phiDDiff1(const double &g, const double &h1k, const double &h2k, const double &hL, const double &hR, const double &uL, const double &uR)
Definition: SW2DCV.h:94
proteus::arguments_dict::array
xt::pyarray< T > & array(const std::string &key)
proteus::SW2DCV::calculateCFL
void calculateCFL(const double &elementDiameter, const double &g, const double &h, const double &hu, const double &hv, const double hEps, double &cfl)
Definition: SW2DCV.h:215
proteus::nu3
double nu3(const double &g, const double &hStar, const double &hR, const double &uR, const double &one_over_hR)
Definition: SW2DCV.h:82
proteus::phiDiff
double phiDiff(const double &g, const double &h1k, const double &h2k, const double &hL, const double &hR, const double &uL, const double &uR)
Definition: SW2DCV.h:88
proteus::ENTROPY_FLUX1
double ENTROPY_FLUX1(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:71
min
#define min(a, b)
Definition: jf.h:71
v
Double v
Definition: Headers.h:95
proteus::ENTROPY
double ENTROPY(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:50
proteus::phiDDiff2
double phiDDiff2(const double &g, const double &h1k, const double &h2k, const double &hL, const double &hR, const double &uL, const double &uR)
Definition: SW2DCV.h:100
z
Double * z
Definition: Headers.h:49
u
Double u
Definition: Headers.h:89
proteus::SW2DCV_base::calculateResidual
virtual void calculateResidual(xt::pyarray< double > &mesh_trial_ref, xt::pyarray< double > &mesh_grad_trial_ref, xt::pyarray< double > &mesh_dof, xt::pyarray< double > &mesh_velocity_dof, double MOVING_DOMAIN, xt::pyarray< int > &mesh_l2g, xt::pyarray< double > &dV_ref, xt::pyarray< double > &h_trial_ref, xt::pyarray< double > &h_grad_trial_ref, xt::pyarray< double > &h_test_ref, xt::pyarray< double > &h_grad_test_ref, xt::pyarray< double > &vel_trial_ref, xt::pyarray< double > &vel_grad_trial_ref, xt::pyarray< double > &vel_test_ref, xt::pyarray< double > &vel_grad_test_ref, xt::pyarray< double > &mesh_trial_trace_ref, xt::pyarray< double > &mesh_grad_trial_trace_ref, xt::pyarray< double > &dS_ref, xt::pyarray< double > &h_trial_trace_ref, xt::pyarray< double > &h_grad_trial_trace_ref, xt::pyarray< double > &h_test_trace_ref, xt::pyarray< double > &h_grad_test_trace_ref, xt::pyarray< double > &vel_trial_trace_ref, xt::pyarray< double > &vel_grad_trial_trace_ref, xt::pyarray< double > &vel_test_trace_ref, xt::pyarray< double > &vel_grad_test_trace_ref, xt::pyarray< double > &normal_ref, xt::pyarray< double > &boundaryJac_ref, xt::pyarray< double > &elementDiameter, int nElements_global, double useRBLES, double useMetrics, double alphaBDF, double nu, double g, xt::pyarray< int > &h_l2g, xt::pyarray< int > &vel_l2g, xt::pyarray< double > &h_dof_old, xt::pyarray< double > &hu_dof_old, xt::pyarray< double > &hv_dof_old, xt::pyarray< double > &b_dof, xt::pyarray< double > &h_dof, xt::pyarray< double > &hu_dof, xt::pyarray< double > &hv_dof, xt::pyarray< double > &h_dof_sge, xt::pyarray< double > &hu_dof_sge, xt::pyarray< double > &hv_dof_sge, xt::pyarray< double > &q_mass_acc, xt::pyarray< double > &q_mom_hu_acc, xt::pyarray< double > &q_mom_hv_acc, xt::pyarray< double > &q_mass_adv, xt::pyarray< double > &q_mass_acc_beta_bdf, xt::pyarray< double > &q_mom_hu_acc_beta_bdf, xt::pyarray< double > &q_mom_hv_acc_beta_bdf, xt::pyarray< double > &q_cfl, xt::pyarray< int > &sdInfo_hu_hu_rowptr, xt::pyarray< int > &sdInfo_hu_hu_colind, xt::pyarray< int > &sdInfo_hu_hv_rowptr, xt::pyarray< int > &sdInfo_hu_hv_colind, xt::pyarray< int > &sdInfo_hv_hv_rowptr, xt::pyarray< int > &sdInfo_hv_hv_colind, xt::pyarray< int > &sdInfo_hv_hu_rowptr, xt::pyarray< int > &sdInfo_hv_hu_colind, int offset_h, int offset_hu, int offset_hv, int stride_h, int stride_hu, int stride_hv, xt::pyarray< double > &globalResidual, int nExteriorElementBoundaries_global, xt::pyarray< int > &exteriorElementBoundariesArray, xt::pyarray< int > &elementBoundaryElementsArray, xt::pyarray< int > &elementBoundaryLocalElementBoundariesArray, xt::pyarray< int > &isDOFBoundary_h, xt::pyarray< int > &isDOFBoundary_hu, xt::pyarray< int > &isDOFBoundary_hv, xt::pyarray< int > &isAdvectiveFluxBoundary_h, xt::pyarray< int > &isAdvectiveFluxBoundary_hu, xt::pyarray< int > &isAdvectiveFluxBoundary_hv, xt::pyarray< int > &isDiffusiveFluxBoundary_hu, xt::pyarray< int > &isDiffusiveFluxBoundary_hv, xt::pyarray< double > &ebqe_bc_h_ext, xt::pyarray< double > &ebqe_bc_flux_mass_ext, xt::pyarray< double > &ebqe_bc_flux_mom_hu_adv_ext, xt::pyarray< double > &ebqe_bc_flux_mom_hv_adv_ext, xt::pyarray< double > &ebqe_bc_hu_ext, xt::pyarray< double > &ebqe_bc_flux_hu_diff_ext, xt::pyarray< double > &ebqe_penalty_ext, xt::pyarray< double > &ebqe_bc_hv_ext, xt::pyarray< double > &ebqe_bc_flux_hv_diff_ext, xt::pyarray< double > &q_velocity, xt::pyarray< double > &ebqe_velocity, xt::pyarray< double > &flux, xt::pyarray< double > &elementResidual_h, xt::pyarray< double > &Cx, xt::pyarray< double > &Cy, xt::pyarray< double > &CTx, xt::pyarray< double > &CTy, int numDOFsPerEqn, int NNZ, xt::pyarray< int > &csrRowIndeces_DofLoops, xt::pyarray< int > &csrColumnOffsets_DofLoops, xt::pyarray< double > &lumped_mass_matrix, double cfl_run, double hEps, xt::pyarray< double > &hReg, xt::pyarray< double > &hnp1_at_quad_point, xt::pyarray< double > &hunp1_at_quad_point, xt::pyarray< double > &hvnp1_at_quad_point, xt::pyarray< double > &extendedSourceTerm_hu, xt::pyarray< double > &extendedSourceTerm_hv, xt::pyarray< double > &dH_minus_dL, xt::pyarray< double > &muH_minus_muL, double cE, int LUMPED_MASS_MATRIX, double dt, int LINEAR_FRICTION, double mannings, xt::pyarray< double > &quantDOFs, int SECOND_CALL_CALCULATE_RESIDUAL, int COMPUTE_NORMALS, xt::pyarray< double > &normalx, xt::pyarray< double > &normaly, xt::pyarray< double > &dLow, xt::pyarray< double > &hBT, xt::pyarray< double > &huBT, xt::pyarray< double > &hvBT, int lstage)=0
c
Double c
Definition: Headers.h:54
proteus::SW2DCV
Definition: SW2DCV.h:146
proteus::phi
double phi(const double &g, const double &h, const double &hL, const double &hR, const double &uL, const double &uR)
Definition: SW2DCV.h:62
proteus::hStarLFromQuadPhiFromAbove
double hStarLFromQuadPhiFromAbove(const double &g, const double &hStarL, const double &hStarR, const double &hL, const double &hR, const double &uL, const double &uR)
Definition: SW2DCV.h:106
proteus::SW2DCV::calculateLumpedMassMatrix
void calculateLumpedMassMatrix(arguments_dict &args)
Definition: SW2DCV.h:1918
xt
Definition: AddedMass.cpp:7
proteus::newSW2DCV
SW2DCV_base * newSW2DCV(int nSpaceIn, int nQuadraturePoints_elementIn, int nDOF_mesh_trial_elementIn, int nDOF_trial_elementIn, int nDOF_test_elementIn, int nQuadraturePoints_elementBoundaryIn, int CompKernelFlag)
Definition: SW2DCV.h:2155
proteus::SW2DCV::calculateMassMatrix
void calculateMassMatrix(arguments_dict &args)
Definition: SW2DCV.h:1682
proteus::nu1
double nu1(const double &g, const double &hStar, const double &hL, const double &uL, const double &one_over_hL)
Definition: SW2DCV.h:76
proteus::phip
double phip(const double &g, const double &h, const double &hL, const double &hR)
Definition: SW2DCV.h:72
CompKernel
Definition: CompKernel.h:1018
proteus::SW2DCV::calculateEdgeBasedCFL
double calculateEdgeBasedCFL(arguments_dict &args)
Definition: SW2DCV.h:586
proteus::chooseAndAllocateDiscretization2D
Model_Base * chooseAndAllocateDiscretization2D(int nSpaceIn, int nQuadraturePoints_elementIn, int nDOF_mesh_trial_elementIn, int nDOF_trial_elementIn, int nDOF_test_elementIn, int nDOF_v_trial_elementIn, int nDOF_v_test_elementIn, int nQuadraturePoints_elementBoundaryIn, int CompKernelFlag)
Definition: MixedModelFactory.h:309
proteus::SW2DCV_base::~SW2DCV_base
virtual ~SW2DCV_base()
Definition: SW2DCV.h:134
proteus::SW2DCV::maxWaveSpeedSharpInitialGuess
double maxWaveSpeedSharpInitialGuess(double g, double nx, double ny, double hL, double huL, double hvL, double hR, double huR, double hvR, double hEps, bool debugging)
Definition: SW2DCV.h:160
proteus::SW2DCV::calculateEV
void calculateEV(arguments_dict &args)
Definition: SW2DCV.h:661
proteus
Definition: ADR.h:17
proteus::hStarRFromQuadPhiFromBelow
double hStarRFromQuadPhiFromBelow(const double &g, const double &hStarL, const double &hStarR, const double &hL, const double &hR, const double &uL, const double &uR)
Definition: SW2DCV.h:117
proteus::SW2DCV_base::calculateLumpedMassMatrix
virtual void calculateLumpedMassMatrix(arguments_dict &args)=0
proteus::f
double f(const double &g, const double &h, const double &hZ)
Definition: SW2DCV.h:58
proteus::DENTROPY_DHV
double DENTROPY_DHV(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:66
proteus::arguments_dict
Definition: ArgumentsDict.h:70
proteus::ENTROPY_FLUX2
double ENTROPY_FLUX2(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:78
max
#define max(a, b)
Definition: analyticalSolutions.h:14
proteus::SW2DCV::calculateResidual
void calculateResidual(arguments_dict &args)
Definition: SW2DCV.h:813
psi
Double psi
Definition: Headers.h:78
ArgumentsDict.h
proteus::SW2DCV_base::calculateEdgeBasedCFL
virtual double calculateEdgeBasedCFL(arguments_dict &args)=0
cE
#define cE
Definition: NCLS3P.h:10