proteus  1.8.1
C/C++/Fortran libraries
GN_SW2DCV.h
Go to the documentation of this file.
1 #ifndef GN_SW2DCV_H
2 #define GN_SW2DCV_H
3 #include "ArgumentsDict.h"
4 #include "CompKernel.h"
5 #include "ModelFactory.h"
6 #include "xtensor-python/pyarray.hpp"
7 #include <assert.h>
8 #include <cmath>
9 #include <iostream>
10 #include <valarray>
11 
12 namespace py = pybind11;
13 
14 #define VEL_FIX_POWER 2.
15 #define LAMBDA_MGN 1.
16 #define IF_BOTH_GAMMA_BRANCHES 0
17 #define LIMITING_ITERATION 2
18 #define IF_DEBUGGING 0
19 #define IF_LIMITING_DEBUGGING 0
20 
21 /* inline functions */
22 namespace proteus {
23 
24 // for Serre-Green-Naghdi "max" wave speeds
25 inline double GN_nu1(const double &g, const double &hL, const double &uL,
26  const double &etaL, const double &inv_meshSizeL) {
27 
28  double augL = LAMBDA_MGN / 3. * inv_meshSizeL * (6. * hL + 12. * (hL - etaL));
29  const double meshSizeL = 1. / inv_meshSizeL;
30 
31  if (etaL >= hL) {
32  augL = LAMBDA_MGN / 3. * inv_meshSizeL * (6. * hL);
33  }
34  augL = augL * std::pow(meshSizeL / fmax(meshSizeL, hL), 2);
35 
36  return uL - sqrt(g * hL) * sqrt(1. + augL);
37 }
38 inline double GN_nu3(const double &g, const double &hR, const double &uR,
39  const double &etaR, const double &inv_meshSizeR) {
40  double augR = LAMBDA_MGN / 3. * inv_meshSizeR * (6. * hR + 12. * (hR - etaR));
41  const double meshSizeR = 1. / inv_meshSizeR;
42 
43  if (etaR >= hR) {
44  augR = LAMBDA_MGN / 3. * inv_meshSizeR * (6. * hR);
45  }
46  augR = augR * std::pow(meshSizeR / fmax(meshSizeR, hR), 2);
47  return uR + sqrt(g * hR) * sqrt(1 + augR);
48 }
49 // FOR CELL BASED ENTROPY VISCOSITY
50 inline double ENTROPY(const double &g, const double &h, const double &hu,
51  const double &hv, const double &z,
52  const double &one_over_hReg) {
53  return 0.5 *
54  (g * h * h + one_over_hReg * (hu * hu + hv * hv) + 2. * g * h * z);
55 }
56 inline double DENTROPY_DH(const double &g, const double &h, const double &hu,
57  const double &hv, const double &z,
58  const double &one_over_hReg) {
59  return g * h - 0.5 * (hu * hu + hv * hv) * std::pow(one_over_hReg, 2) + g * z;
60 }
61 inline double DENTROPY_DHU(const double &g, const double &h, const double &hu,
62  const double &hv, const double &z,
63  const double &one_over_hReg) {
64  return hu * one_over_hReg;
65 }
66 inline double DENTROPY_DHV(const double &g, const double &h, const double &hu,
67  const double &hv, const double &z,
68  const double &one_over_hReg) {
69  return hv * one_over_hReg;
70 }
71 inline double ENTROPY_FLUX1(const double &g, const double &h, const double &hu,
72  const double &hv, const double &z,
73  const double &one_over_hReg) {
74  return (ENTROPY(g, h, hu, hv, z, one_over_hReg) + 0.5 * g * h * h +
75  g * h * z) *
76  hu * one_over_hReg;
77 }
78 inline double ENTROPY_FLUX2(const double &g, const double &h, const double &hu,
79  const double &hv, const double &z,
80  const double &one_over_hReg) {
81  return (ENTROPY(g, h, hu, hv, z, one_over_hReg) + 0.5 * g * h * h +
82  g * h * z) *
83  hv * one_over_hReg;
84 }
85 inline double relaxation(const double &xi, const double &alpha) {
86  double function = 0.;
87  if (xi < 1.) {
88  const double den = 1.0 - alpha;
89  const double num = std::exp(-std::abs(std::log(alpha)) * xi * xi) - alpha;
90  function = num / den;
91  }
92  return function;
93 }
94 } // namespace proteus
95 
96 namespace proteus {
97 
99 public:
100  virtual ~GN_SW2DCV_base() {}
101  virtual void convexLimiting(arguments_dict &args) = 0;
102  virtual double calculateEdgeBasedCFL(arguments_dict &args) = 0;
103  virtual void calculatePreStep(arguments_dict &args) = 0;
104  virtual void calculateEV(arguments_dict &args) = 0;
106  virtual void calculateResidual(arguments_dict &args) = 0;
107  virtual void calculateMassMatrix(arguments_dict &args) = 0;
108  virtual void calculateLumpedMassMatrix(arguments_dict &args) = 0;
109 };
110 
111 template <class CompKernelType, int nSpace, int nQuadraturePoints_element,
112  int nDOF_mesh_trial_element, int nDOF_trial_element,
113  int nDOF_test_element, int nQuadraturePoints_elementBoundary>
114 class GN_SW2DCV : public GN_SW2DCV_base {
115 public:
117  CompKernelType ck;
119  : nDOF_test_X_trial_element(nDOF_test_element * nDOF_trial_element),
120  ck() {
121  std::cout << "Constructing GN_SW2DCV<CompKernelTemplate<" << nSpace << ","
122  << nQuadraturePoints_element << "," << nDOF_mesh_trial_element
123  << "," << nDOF_trial_element << "," << nDOF_test_element << ","
124  << nQuadraturePoints_elementBoundary << ">());" << std::endl
125  << std::flush;
126  }
127 
129  const double g, const double nx, const double ny, const double hL,
130  const double huL, double hvL, const double hetaL, const double inv_MeshL,
131  const double hR, const double huR, const double hvR, const double hetaR,
132  const double inv_MeshR, const double hEps) {
133  const double one_over_hL =
134  2.0 * hL / (hL * hL + std::pow(fmax(hL, hEps), 2.0));
135  const double one_over_hR =
136  2.0 * hR / (hR * hR + std::pow(fmax(hR, hEps), 2.0));
137  const double hVelL = nx * huL + ny * hvL;
138  const double hVelR = nx * huR + ny * hvR;
139  const double velL = one_over_hL * hVelL;
140  const double velR = one_over_hR * hVelR;
141  const double etaL = one_over_hL * hetaL;
142  const double etaR = one_over_hR * hetaR;
143 
144  /* See equation 4.12 JCP 2019 paper:
145  1-eigenvalue: uL-sqrt(g*hL)*sqrt(1 + augL)
146  3-eigenvalue: uR+sqrt(g*hR)*sqrt(1 + augR)
147  */
148  const double lambda1 = GN_nu1(g, hL, velL, etaL, inv_MeshL);
149  const double lambda3 = GN_nu3(g, hR, velR, etaR, inv_MeshR);
150 
151  return fmax(fabs(lambda1), fabs(lambda3));
152  }
153 
154  inline void calculateCFL(const double &elementDiameter, const double &g,
155  const double &h, const double &hu, const double &hv,
156  const double hEps, double &cfl) {
157  double cflx, cfly, c = sqrt(fmax(g * hEps, g * h));
158  const double u = 2 * h / (h * h + std::pow(fmax(h, hEps), 2)) * hu;
159  const double v = 2 * h / (h * h + std::pow(fmax(h, hEps), 2)) * hv;
160 
161  if (u > 0.0)
162  cflx = (u + c) / elementDiameter;
163  else
164  cflx = fabs(u - c) / elementDiameter;
165 
166  if (v > 0.0)
167  cfly = (v + c) / elementDiameter;
168  else
169  cfly = fabs(v - c) / elementDiameter;
170  cfl = sqrt(cflx * cflx + cfly * cfly); // hack, conservative estimate
171  }
172 
174  const int numDOFs = args.scalar<int>("numDOFs");
175  const xt::pyarray<int> &csrRowIndeces_DofLoops =
176  args.array<int>("csrRowIndeces_DofLoops");
177  const xt::pyarray<int> &csrColumnOffsets_DofLoops =
178  args.array<int>("csrColumnOffsets_DofLoops");
179  const xt::pyarray<double> &MassMatrix = args.array<double>("MassMatrix");
180  const xt::pyarray<double> &lumped_mass_matrix =
181  args.array<double>("lumped_mass_matrix");
182  const double dt = args.scalar<double>("dt");
183  const xt::pyarray<double> &h_old = args.array<double>("h_old");
184  const xt::pyarray<double> &hu_old = args.array<double>("hu_old");
185  const xt::pyarray<double> &hv_old = args.array<double>("hv_old");
186  const xt::pyarray<double> &heta_old = args.array<double>("heta_old");
187  const xt::pyarray<double> &hw_old = args.array<double>("hw_old");
188  const xt::pyarray<double> &hbeta_old = args.array<double>("hbeta_old");
189  const xt::pyarray<double> &b_dof = args.array<double>("b_dof");
190  xt::pyarray<double> &limited_hnp1 = args.array<double>("limited_hnp1");
191  xt::pyarray<double> &limited_hunp1 = args.array<double>("limited_hunp1");
192  xt::pyarray<double> &limited_hvnp1 = args.array<double>("limited_hvnp1");
193  xt::pyarray<double> &limited_hetanp1 =
194  args.array<double>("limited_hetanp1");
195  xt::pyarray<double> &limited_hwnp1 = args.array<double>("limited_hwnp1");
196  xt::pyarray<double> &limited_hbetanp1 =
197  args.array<double>("limited_hbetanp1");
198  const double hEps = args.scalar<double>("hEps");
199  xt::pyarray<double> &hLow = args.array<double>("hLow");
200  xt::pyarray<double> &huLow = args.array<double>("huLow");
201  xt::pyarray<double> &hvLow = args.array<double>("hvLow");
202  xt::pyarray<double> &hetaLow = args.array<double>("hetaLow");
203  xt::pyarray<double> &hwLow = args.array<double>("hwLow");
204  xt::pyarray<double> &hbetaLow = args.array<double>("hbetaLow");
205  const xt::pyarray<double> &h_min = args.array<double>("h_min");
206  const xt::pyarray<double> &h_max = args.array<double>("h_max");
207  const xt::pyarray<double> &heta_min = args.array<double>("heta_min");
208  const xt::pyarray<double> &heta_max = args.array<double>("heta_max");
209  const xt::pyarray<double> &kin_max = args.array<double>("kin_max");
210  const double KE_tiny = args.scalar<double>("KE_tiny");
211  const xt::pyarray<double> &SourceTerm_h =
212  args.array<double>("SourceTerm_h");
213  const xt::pyarray<double> &SourceTerm_hu =
214  args.array<double>("SourceTerm_hu");
215  const xt::pyarray<double> &SourceTerm_hv =
216  args.array<double>("SourceTerm_hv");
217  const xt::pyarray<double> &SourceTerm_heta =
218  args.array<double>("SourceTerm_heta");
219  const xt::pyarray<double> &SourceTerm_hw =
220  args.array<double>("SourceTerm_hw");
221  const xt::pyarray<double> &SourceTerm_hbeta =
222  args.array<double>("SourceTerm_hbeta");
223  const xt::pyarray<double> &global_entropy_residual =
224  args.array<double>("global_entropy_residual");
225  const xt::pyarray<double> &Cx = args.array<double>("Cx");
226  const xt::pyarray<double> &Cy = args.array<double>("Cy");
227  const xt::pyarray<double> &CTx = args.array<double>("CTx");
228  const xt::pyarray<double> &CTy = args.array<double>("CTy");
229  const xt::pyarray<double> &RHS_high_h = args.array<double>("RHS_high_h");
230  const xt::pyarray<double> &RHS_high_hu = args.array<double>("RHS_high_hu");
231  const xt::pyarray<double> &RHS_high_hv = args.array<double>("RHS_high_hv");
232  const xt::pyarray<double> &RHS_high_heta =
233  args.array<double>("RHS_high_heta");
234  const xt::pyarray<double> &RHS_high_hw = args.array<double>("RHS_high_hw");
235  const xt::pyarray<double> &RHS_high_hbeta =
236  args.array<double>("RHS_high_hbeta");
237  const xt::pyarray<double> &extendedSourceTerm_hu =
238  args.array<double>("extendedSourceTerm_hu");
239  const xt::pyarray<double> &extendedSourceTerm_hv =
240  args.array<double>("extendedSourceTerm_hv");
241  const xt::pyarray<double> &thetaj_inv = args.array<double>("thetaj_inv");
242  const double g = args.scalar<double>("g");
243  const xt::pyarray<double> &inverse_mesh =
244  args.array<double>("inverse_mesh");
245 
246  // Create FCT component matrices in vector form
247  std::valarray<double> FCT_h(0., Cx.size()), FCT_hu(0., Cx.size()),
248  FCT_hv(0., Cx.size()), FCT_heta(0., Cx.size()), FCT_hw(0., Cx.size()),
249  FCT_hbeta(0., Cx.size());
250 
252  // Loop to define FCT matrices for each component //
254  int ij = 0;
255  for (int i = 0; i < numDOFs; i++) {
256  // Read un at ith node
257  const double hi = h_old[i];
258  const double one_over_hi =
259  2. * hi / (hi * hi + std::pow(fmax(hi, hEps), 2)); // hEps
260  const double hui = hu_old[i];
261  const double ui = hui * one_over_hi;
262  const double hvi = hv_old[i];
263  const double vi = hvi * one_over_hi;
264  const double hetai = heta_old[i];
265  const double hwi = hw_old[i];
266  const double hbetai = hbeta_old[i];
267  const double Zi = b_dof[i];
268  const double mi = lumped_mass_matrix[i];
269  const double inv_meshSizei = inverse_mesh[i];
270 
271  // LOOP OVER THE SPARSITY PATTERN (j-LOOP)//
272  for (int offset = csrRowIndeces_DofLoops[i];
273  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
274  int j = csrColumnOffsets_DofLoops[offset];
275 
276  if (j == i) {
277  FCT_h[ij] = 0.;
278  FCT_hu[ij] = 0.;
279  FCT_hv[ij] = 0.;
280  FCT_heta[ij] = 0.;
281  FCT_hw[ij] = 0.;
282  FCT_hbeta[ij] = 0.;
283  } else {
284  // Read un stuff at jth node
285  const double hj = h_old[j];
286  const double one_over_hj =
287  2. * hj / (hj * hj + std::pow(fmax(hj, hEps), 2));
288  const double huj = hu_old[j];
289  const double uj = huj * one_over_hj;
290  const double hvj = hv_old[j];
291  const double vj = hvj * one_over_hj;
292  const double hetaj = heta_old[j];
293  const double hwj = hw_old[j];
294  const double hbetaj = hbeta_old[j];
295  const double Zj = b_dof[j];
296  const double mj = lumped_mass_matrix[j];
297  const double inv_meshSizej = inverse_mesh[j];
298 
299  // Compute star states
300  const double hStarij = fmax(0., hi + Zi - fmax(Zi, Zj));
301  const double hStarji = fmax(0., hj + Zj - fmax(Zi, Zj));
302  //
303  const double hStar_ratio_i = hStarij * one_over_hi;
304  const double hStar_ratio_j = hStarji * one_over_hj;
305  //
306  const double huStarij = hui * hStar_ratio_i;
307  const double hvStarij = hvi * hStar_ratio_i;
308  const double hetaStarij = hetai * std::pow(hStar_ratio_i, 2);
309  const double hwStarij = hwi * hStar_ratio_i;
310  const double hbetaStarij = hbetai * hStar_ratio_i;
311  //
312  const double huStarji = huj * hStar_ratio_j;
313  const double hvStarji = hvj * hStar_ratio_j;
314  const double hetaStarji = hetaj * std::pow(hStar_ratio_j, 2);
315  const double hwStarji = hwj * hStar_ratio_j;
316  const double hbetaStarji = hbetaj * hStar_ratio_j;
317 
318  const double b_ij = 0. - MassMatrix[ij] / mj;
319  const double b_ji = 0. - MassMatrix[ij] / mi;
320 
321  /* Redefine high-order viscosity. Note that this is not so expensive
322  since we are just accessing and multipying. */
323  const double cij_norm = sqrt(Cx[ij] * Cx[ij] + Cy[ij] * Cy[ij]);
324  const double cji_norm = sqrt(CTx[ij] * CTx[ij] + CTy[ij] * CTy[ij]);
325  const double nxij = Cx[ij] / cij_norm;
326  const double nyij = Cy[ij] / cij_norm;
327  const double nxji = CTx[ij] / cji_norm;
328  const double nyji = CTy[ij] / cji_norm;
329 
330  const double muijL = fmax(std::abs(ui * Cx[ij] + vi * Cy[ij]),
331  std::abs(uj * CTx[ij] + vj * CTy[ij]));
332  double dijL = fmax(
333  maxWaveSpeedSharpInitialGuess(g, nxij, nyij, hi, hui, hvi, hetai,
334  inv_meshSizei, hj, huj, hvj, hetaj,
335  inv_meshSizej, hEps) *
336  cij_norm,
337  maxWaveSpeedSharpInitialGuess(g, nxji, nyji, hj, huj, hvj, hetaj,
338  inv_meshSizej, hi, hui, hvi, hetai,
339  inv_meshSizei, hEps) *
340  cji_norm);
341 
342  // Take max with muij
343  dijL = std::max(dijL, muijL);
344 
345  // Define high-order graph viscosity coefficients
346  const double dEVij =
347  std::max(global_entropy_residual[i], global_entropy_residual[j]);
348 
349  const double dijH = std::min(dijL, dEVij);
350  const double muijH = std::min(muijL, dEVij);
351 
352  const double diff_dij_muij = (dijH - dijL) - (muijH - muijL);
353  const double diff_muij = (muijH - muijL);
354 
355  // h
356  double viscous_terms =
357  diff_dij_muij * (hStarji - hStarij) + diff_muij * (hj - hi);
358  FCT_h[ij] = dt * (b_ij * RHS_high_h[j] - b_ji * RHS_high_h[i] +
359  viscous_terms);
360  // hu
361  viscous_terms =
362  diff_dij_muij * (huStarji - huStarij) + diff_muij * (huj - hui);
363  FCT_hu[ij] = dt * (b_ij * RHS_high_hu[j] - b_ji * RHS_high_hu[i] +
364  viscous_terms);
365  // hv
366  viscous_terms =
367  diff_dij_muij * (hvStarji - hvStarij) + diff_muij * (hvj - hvi);
368  FCT_hv[ij] = dt * (b_ij * RHS_high_hv[j] - b_ji * RHS_high_hv[i] +
369  viscous_terms);
370  // heta
371  viscous_terms = diff_dij_muij * (hetaStarji - hetaStarij) +
372  diff_muij * (hetaj - hetai);
373  FCT_heta[ij] = dt * (b_ij * RHS_high_heta[j] -
374  b_ji * RHS_high_heta[i] + viscous_terms);
375  // hw
376  viscous_terms =
377  diff_dij_muij * (hwStarji - hwStarij) + diff_muij * (hwj - hwi);
378  FCT_hw[ij] = dt * (b_ij * RHS_high_hw[j] - b_ji * RHS_high_hw[i] +
379  viscous_terms);
380  // hbeta
381  viscous_terms = diff_dij_muij * (hbetaStarji - hbetaStarij) +
382  diff_muij * (hbetaj - hbetai);
383  FCT_hbeta[ij] = dt * (b_ij * RHS_high_hbeta[j] -
384  b_ji * RHS_high_hbeta[i] + viscous_terms);
385  }
386 
387  // UPDATE ij //
388  ij += 1;
389  } // j loop ends here
390  } // i loop ends here
391 
393  // Main loop to define limiters and compute limited solution //////
395 
396  // Create Lij_array and initialize with 1
397  std::valarray<double> Lij_array(1., Cx.size());
398 
399  // define tolerance for limiting
400  const double eps = 1e-14;
401 
402  /* Loop over # of limiting iterations */
403  for (int limit_iter = 0; limit_iter < LIMITING_ITERATION; limit_iter++) {
404 
405  /* Loop to compute limiter l^j_i and l^i_j */
406  ij = 0;
407  for (int i = 0; i < numDOFs; i++) {
408 
409  // Get low order solution at ith node
410  const double hLowi = hLow[i];
411  const double huLowi = huLow[i];
412  const double hvLowi = hvLow[i];
413  const double hetaLowi = hetaLow[i];
414  const double kinMaxi = kin_max[i];
415  const double mi = lumped_mass_matrix[i];
416 
417  // LOOP OVER THE SPARSITY PATTERN (j-LOOP)
418  for (int offset = csrRowIndeces_DofLoops[i];
419  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
420  int j = csrColumnOffsets_DofLoops[offset];
421 
422  Lij_array[ij] = 1.;
423 
424  // Get low order solution at jth node
425  const double hLowj = hLow[j];
426  const double huLowj = huLow[j];
427  const double hvLowj = hvLow[j];
428  const double hetaLowj = hetaLow[j];
429  const double kinMaxj = kin_max[j];
430  const double mj = lumped_mass_matrix[j];
431 
432  // Compute Pij matrix and Pji
433  double denom = 1. / (mi * thetaj_inv[i]);
434  const double P_h = FCT_h[ij] * denom;
435  const double P_hu = FCT_hu[ij] * denom;
436  const double P_hv = FCT_hv[ij] * denom;
437  const double P_heta = FCT_heta[ij] * denom;
438 
439  denom = 1. / (mj * thetaj_inv[j]);
440  const double P_h_tr = -FCT_h[ij] * denom;
441  const double P_hu_tr = -FCT_hu[ij] * denom;
442  const double P_hv_tr = -FCT_hv[ij] * denom;
443  const double P_heta_tr = -FCT_heta[ij] * denom;
444 
445  /* h limiting -- to defne l_ji_h */
446  double l_ji_h = Lij_array[ij];
447  {
448  const double denominator = 1. / (std::abs(P_h) + eps * h_max[i]);
449 
450  // Define limiter here
451  if (hLowi + P_h < h_min[i]) {
452  l_ji_h = std::min((std::abs(h_min[i] - hLowi) + eps * h_min[i]) *
453  denominator,
454  1.);
455  } else if (h_max[i] < hLowi + P_h) {
456  l_ji_h = std::min((std::abs(h_max[i] - hLowi) + eps * h_min[i]) *
457  denominator,
458  1.);
459  }
460 
461  // Set limiter to 0 if water depth is close to 0
462  l_ji_h = (hLowi <= hEps) ? 0. : l_ji_h;
463 
464  /* Box limiter to be safe? */
465  l_ji_h = std::min(l_ji_h, 1.);
466  l_ji_h = std::max(l_ji_h, 0.);
467 
468 #if IF_LIMITING_DEBUGGING
469  if ((hLowi + l_ji_h * P_h) - h_min[i] < -1e-12) {
470  std::cout << " MAJOR BUG 1a " << std::setprecision(15) << " \n "
471  << " Diff = " << (hLowi + l_ji_h * P_h) - h_min[i]
472  << " \n "
473  << " hLowi = " << hLowi << " \n "
474  << " h_min = " << h_min[i] << " \n "
475  << " h_max = " << h_max[i] << " \n "
476  << " l_ji_h = " << l_ji_h << std::endl;
477  std::cout << "LIMIT_ITER " << limit_iter << std::endl;
478  abort();
479  }
480 
481  if (h_max[i] - (hLowi + l_ji_h * P_h) < -1e-12) {
482  std::cout << " MAJOR BUG 1b " << std::setprecision(15) << " \n "
483  << " Diff = " << h_max[i] - (hLowi + l_ji_h * P_h)
484  << " \n "
485  << " Soln = " << (hLowi + P_h) << " \n "
486  << " hLowi = " << hLowi << " \n "
487  << " h_min = " << h_min[i] << " \n "
488  << " h_max = " << h_max[i] << " \n "
489  << " P_h = " << P_h << " \n "
490  << " l_ji_h = " << l_ji_h << std::endl;
491  std::cout << "LIMIT_ITER " << limit_iter << std::endl;
492  abort();
493  }
494 #endif
495  }
496 
497  /* h limiting -- to define l_ij_h */
498  double l_ij_h = Lij_array[ij];
499  {
500  const double denominator_test =
501  1. / (std::abs(P_h_tr) + eps * h_max[j]);
502 
503  if (hLowj + P_h_tr < h_min[j]) {
504  l_ij_h = std::min((std::abs(h_min[j] - hLowj) + eps * h_min[j]) *
505  denominator_test,
506  1.);
507  } else if (h_max[j] < hLowj + P_h_tr) {
508  l_ij_h = std::min((std::abs(h_max[j] - hLowj) + eps * h_min[j]) *
509  denominator_test,
510  1.);
511  }
512 
513  // set limiter to 0 if water depth is close to 0
514  l_ij_h = (hLowj <= hEps) ? 0. : l_ij_h;
515 
516  /* Box limiter to be safe? */
517  l_ij_h = std::min(l_ij_h, 1.);
518  l_ij_h = std::max(l_ij_h, 0.);
519 
520 #if IF_LIMITING_DEBUGGING
521  if ((hLowj + l_ij_h * P_h_tr) - h_min[j] < -1e-12) {
522  std::cout << " MAJOR BUG 2a " << std::setprecision(15) << " \n "
523  << " Diff = " << h_min[j] - (hLowj + l_ij_h * P_h_tr)
524  << " \n "
525  << " Soln = " << (hLowj + P_h_tr) << " \n "
526  << " hLowj = " << hLowj << " \n "
527  << " h_min = " << h_min[j] << " \n "
528  << " h_max = " << h_max[j] << " \n "
529  << " P_h_tr = " << P_h_tr << " \n "
530  << " l_ij_h = " << l_ij_h << std::endl;
531  std::cout << "LIMIT_ITER " << limit_iter << std::endl;
532  abort();
533  }
534 
535  if (h_max[j] - (hLowj + l_ij_h * P_h_tr) < -1e-12) {
536  std::cout << " MAJOR BUG 2b " << std::setprecision(15) << " \n "
537  << " Diff = " << h_max[j] - (hLowj + l_ij_h * P_h_tr)
538  << " \n "
539  << " Soln = " << (hLowj + P_h_tr) << " \n "
540  << " hLowj = " << hLowj << " \n "
541  << " h_min = " << h_min[j] << " \n "
542  << " h_max = " << h_max[j] << " \n "
543  << " P_h_tr = " << P_h_tr << " \n "
544  << " l_ij_h = " << l_ij_h << std::endl;
545  std::cout << "LIMIT_ITER " << limit_iter << std::endl;
546  abort();
547  }
548 #endif
549  }
550 
551  /* q1 limiting -- to define l_ji_q1 */
552  double l_ji_q1 = l_ji_h;
553  {
554  const double denominator =
555  1. / (std::abs(P_heta) + eps * heta_max[i]);
556 
557  // Define limiter here
558  if (hetaLowi + P_heta < heta_min[i]) {
559  l_ji_q1 = std::min(
560  (std::abs(heta_min[i] - hetaLowi) + eps * heta_min[i]) *
561  denominator,
562  1.);
563  } else if (heta_max[i] < hetaLowi + P_heta) {
564  l_ji_q1 = std::min(
565  (std::abs(heta_max[i] - hetaLowi) + eps * heta_min[i]) *
566  denominator,
567  1.);
568  }
569 
570  // set limiter to 0 if water depth is close to 0
571  l_ji_q1 = (hLowi <= hEps) ? 0. : l_ji_q1;
572 
573  // get min of l_ji_q1 and previous limiter
574  l_ji_q1 = std::min(l_ji_q1, l_ji_h);
575 
576  /* Box limiter to be safe? */
577  l_ji_q1 = std::min(l_ji_q1, 1.);
578  l_ji_q1 = std::max(l_ji_q1, 0.);
579 
580 #if IF_LIMITING_DEBUGGING
581  if ((hetaLowi + l_ji_q1 * P_heta) - heta_min[i] < -hEps * hEps) {
582  std::cout << " MAJOR BUG 3a " << std::setprecision(15) << " \n "
583  << "New soln " << hetaLowi + l_ji_q1 * P_heta << " \n "
584  << "hetaLowi " << hetaLowi << " \n "
585  << "heta_min " << heta_min[i] << " \n "
586  << "heta_max " << heta_max[i] << " \n "
587  << "l_ji_q1 " << l_ji_q1 << " \n "
588  << "test hi " << hLowi << std::endl;
589  if (heta_max[i] > -hEps)
590  abort();
591  }
592 
593  if (heta_max[i] - (hetaLowi + l_ji_q1 * P_heta) < -hEps * hEps) {
594  std::cout << " MAJOR BUG 3b " << std::setprecision(15) << " \n "
595  << "New soln " << hetaLowi + l_ji_q1 * P_heta << " \n "
596  << "hetaLowi " << hetaLowi << " \n "
597  << "heta_min " << heta_min[i] << " \n "
598  << "heta_max " << heta_max[i] << " \n "
599  << "l_ji_q1 " << l_ji_q1 << " \n "
600  << "test hi " << hLowi << std::endl;
601  if (heta_max[i] > -hEps)
602  abort();
603  }
604 #endif
605  }
606 
607  /* q1 limiting -- to define l_ij_q1 */
608  double l_ij_q1 = l_ij_h;
609  {
610  const double denominator =
611  1. / (std::abs(P_heta_tr) + eps * heta_max[j]);
612 
613  if (hetaLowj + P_heta_tr < heta_min[j]) {
614  l_ij_q1 = std::min(
615  (std::abs(heta_min[j] - hetaLowj) + eps * heta_min[j]) *
616  denominator,
617  1.);
618  } else if (heta_max[j] < hetaLowj + P_heta_tr) {
619  l_ij_q1 = std::min(
620  (std::abs(heta_max[j] - hetaLowj) + eps * heta_min[j]) *
621  denominator,
622  1.);
623  }
624 
625  // set limiter to 0 if water depth is close to 0
626  l_ij_q1 = (hLowj <= hEps) ? 0. : l_ij_q1;
627 
628  // get min of l_ij_q1 and previous limiter
629  l_ij_q1 = std::min(l_ij_q1, l_ij_h);
630 
631  /* Box limiter to be safe? */
632  l_ij_q1 = std::min(l_ij_q1, 1.0);
633  l_ij_q1 = std::max(l_ij_q1, 0.0);
634  }
635 
636 #if IF_LIMITING_DEBUGGING
637  if ((hetaLowj + l_ij_q1 * P_heta_tr) - heta_min[j] < -hEps * hEps) {
638  std::cout << " MAJOR BUG 4a " << std::setprecision(15) << " \n "
639  << "New soln " << hetaLowj + l_ij_q1 * P_heta_tr << " \n "
640  << "hetaLowj " << hetaLowj << " \n "
641  << "heta_min " << heta_min[j] << " \n "
642  << "heta_max " << heta_max[j] << " \n "
643  << "l_ij_q1 " << l_ij_q1 << " \n "
644  << "test hj " << hLowj << std::endl;
645  if (heta_max[j] > -hEps)
646  abort();
647  }
648 
649  if (heta_max[j] - (hetaLowj + l_ij_q1 * P_heta_tr) < -hEps * hEps) {
650  std::cout << " MAJOR BUG 4b " << std::setprecision(15) << " \n "
651  << "New soln " << hetaLowj + l_ij_q1 * P_heta_tr << " \n "
652  << "hetaLowj " << hetaLowj << " \n "
653  << "heta_min " << heta_min[j] << " \n "
654  << "heta_max " << heta_max[j] << " \n "
655  << "l_ij_q1 " << l_ij_q1 << " \n "
656  << "test hj " << hLowj << std::endl;
657  if (heta_max[j] > -hEps)
658  abort();
659  }
660 #endif
661 
662  /* kinetic energy limiting -- to define l_ji_K */
663  double l_tmp = 0.;
664  double l_ji_K = l_ji_q1;
665  {
666  // We first check if current l_ji_K is a good state
667  const double h_r = hLowi + l_ji_K * P_h;
668  const double hu_r = huLowi + l_ji_K * P_hu;
669  const double hv_r = hvLowi + l_ji_K * P_hv;
670  const double psi =
671  kin_max[i] * h_r - 0.5 * (hu_r * hu_r + hv_r * hv_r);
672 
673  l_tmp = (psi > -KE_tiny) ? l_ji_K : l_tmp;
674 
675  const double ai = -0.5 * (P_hu * P_hu + P_hv * P_hv);
676  const double ai_nudged = std::min(ai, -KE_tiny);
677  const double bi = kinMaxi * P_h - (huLowi * P_hu + hvLowi * P_hv);
678  const double ci =
679  hLowi * kinMaxi - 0.5 * (huLowi * huLowi + hvLowi * hvLowi);
680 
681  const double delta_i = bi * bi - 4. * ai * ci;
682  const double root_i =
683  0.5 / ai_nudged * (-bi - std::sqrt(std::abs(delta_i)));
684 
685  // define l_ji_K
686  l_ji_K = (root_i > 0.) ? std::min(root_i, l_ji_K)
687  : std::min(l_tmp, l_ji_K);
688 
689  /* if previous bound was already satisfied, we should take
690  l_tmp instead of l_ji_K */
691  l_tmp = std::max(l_tmp, l_ji_K);
692 
693  // If we are in a "dry state", limiter should be 0
694  l_tmp = (hLowi * kinMaxi <= KE_tiny) ? 0. : l_tmp;
695 
696  /* Then box to be safe */
697  l_tmp = std::min(l_tmp, 1.);
698  l_tmp = std::max(l_tmp, 0.);
699  }
700 
701  /* Define final l_ji_K */
702  l_ji_K = std::min(l_tmp, l_ji_q1);
703 
704  /* kinetic energy limiting -- to define l_ij_K */
705  l_tmp = 0.;
706  double l_ij_K = l_ij_q1;
707  {
708  // We first check if current l_ij_K is a good state
709  const double h_r = hLowj + l_ij_K * P_h_tr;
710  const double hu_r = huLowj + l_ij_K * P_hu_tr;
711  const double hv_r = hvLowj + l_ij_K * P_hv_tr;
712  const double psi =
713  kinMaxj * h_r - 0.5 * (hu_r * hu_r + hv_r * hv_r);
714 
715  l_tmp = (psi > -KE_tiny) ? l_ij_K : l_tmp;
716 
717  const double aj = -0.5 * (P_hu_tr * P_hu_tr + P_hv_tr * P_hv_tr);
718  const double aj_nudged = std::min(aj, -KE_tiny);
719  const double bj =
720  kinMaxj * P_h_tr - (huLowj * P_hu_tr + hvLowj * P_hv_tr);
721  const double cj =
722  hLowj * kinMaxj - 0.5 * (huLowj * huLowj + hvLowj * hvLowj);
723 
724  const double delta_j = bj * bj - 4. * aj * cj;
725  const double root_j =
726  0.5 / aj_nudged * (-bj - std::sqrt(std::abs(delta_j)));
727 
728  // define l_ij_K
729  l_ij_K = (root_j > 0.) ? std::min(root_j, l_ij_K)
730  : std::min(l_tmp, l_ij_K);
731 
732  /* if previous bound was already satisfied, we should take l_tmp
733  instead of l_ij_K */
734  l_tmp = std::max(l_tmp, l_ij_K);
735 
736  // If we are in a "dry state", limiter should be 0
737  l_tmp = (hLowj * kinMaxj <= KE_tiny) ? 0. : l_tmp;
738 
739  /* Box limiter */
740  l_tmp = std::min(l_tmp, 1.);
741  l_tmp = std::max(l_tmp, 0.);
742  }
743 
744  /* Get final l_ij_K*/
745  l_ij_K = std::min(l_tmp, l_ij_q1);
746 
747  /* Then we get the final limiter lij */
748  Lij_array[ij] = std::min(l_ji_K, l_ij_K);
749 
750 #if IF_LIMITING_DEBUGGING
751  if (Lij_array[ij] > 1. || Lij_array[ij] < 0.) {
752  std::cout << "\n Problem with limiter! \n " << Lij_array[ij]
753  << "\n Aborting! " << std::endl;
754  }
755 #endif
756  ij += 1;
757  } // end j loop
758  } // end i loop
759 
760  /* Loop to define limited solution */
761  ij = 0;
762  for (int i = 0; i < numDOFs; i++) {
763 
764  const double one_over_mi = 1. / lumped_mass_matrix[i];
765  double ith_Limiter_times_FCT_matrix1 = 0.;
766  double ith_Limiter_times_FCT_matrix2 = 0.;
767  double ith_Limiter_times_FCT_matrix3 = 0.;
768  double ith_Limiter_times_FCT_matrix4 = 0.;
769  double ith_Limiter_times_FCT_matrix5 = 0.;
770  double ith_Limiter_times_FCT_matrix6 = 0.;
771 
772  // LOOP OVER THE SPARSITY PATTERN (j-LOOP)//
773  for (int offset = csrRowIndeces_DofLoops[i];
774  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
775  int j = csrColumnOffsets_DofLoops[offset];
776 
777  // COMPUTE LIMITED FLUX //
778  ith_Limiter_times_FCT_matrix1 += Lij_array[ij] * FCT_h[ij];
779  ith_Limiter_times_FCT_matrix2 += Lij_array[ij] * FCT_hu[ij];
780  ith_Limiter_times_FCT_matrix3 += Lij_array[ij] * FCT_hv[ij];
781  ith_Limiter_times_FCT_matrix4 += Lij_array[ij] * FCT_heta[ij];
782  ith_Limiter_times_FCT_matrix5 += Lij_array[ij] * FCT_hw[ij];
783  ith_Limiter_times_FCT_matrix6 += Lij_array[ij] * FCT_hbeta[ij];
784 
785  // update ij
786  ij += 1;
787  } // end j loop
788 
789  // then we add lij*Aij to uLow
790  hLow[i] += one_over_mi * ith_Limiter_times_FCT_matrix1;
791  huLow[i] += one_over_mi * ith_Limiter_times_FCT_matrix2;
792  hvLow[i] += one_over_mi * ith_Limiter_times_FCT_matrix3;
793  hetaLow[i] += one_over_mi * ith_Limiter_times_FCT_matrix4;
794  hwLow[i] += one_over_mi * ith_Limiter_times_FCT_matrix5;
795  hbetaLow[i] += one_over_mi * ith_Limiter_times_FCT_matrix6;
796 
797 #if IF_LIMITING_DEBUGGING
798  if (hLow[i] < -hEps) {
799  std::cout
800  << " \n New intermediated/limited water depth is negative! \n"
801  << "new hLow[i] = " << hLow[i] << " \n "
802  << "-hEps = " << -hEps << "\n"
803  << "h_min = " << h_min[i] << "\n"
804  << "h_max = " << h_max[i] << "\n"
805  << "... Aborting! \n"
806  << std::endl;
807  abort();
808  }
809  if (h_max[i] - hLow[i] < -1e-12 || hLow[i] - h_min[i] < -1e-12) {
810  std::cout
811  << std::setprecision(15)
812  << " --- We have a major problem (h limiting bounds) --- \n "
813  << "i = " << i << " \n "
814  << "hLow[i] = " << hLow[i] << " \n "
815  << "h_min[i] = " << h_min[i] << " \n "
816  << "h_max[i] = " << h_max[i] << " \n "
817  << "Diff max = " << h_max[i] - hLow[i] << " \n "
818  << "Diff min = " << hLow[i] - h_min[i] << " \n " << std::endl;
819  std::cout << "LIMIT_ITER " << limit_iter << std::endl;
820  abort();
821  }
822 
823  if (heta_max[i] - hetaLow[i] < -hEps * hEps ||
824  hetaLow[i] - heta_min[i] < -hEps * hEps) {
825  std::cout
826  << std::setprecision(15)
827  << " --- We have a major problem (heta limiting bounds) --- \n
828  "
829  << "hetaLow[i] = " << hetaLow[i] << " \n "
830  << "heta_min[i] = " << heta_min[i] << " \n "
831  << "heta_max[i] = " << heta_max[i] << " \n "
832  << "Diff max = " << heta_max[i] - hetaLow[i] << " \n "
833  << "Diff min = " << hetaLow[i] - heta_min[i] << " \n "
834  << std::endl;
835  abort();
836  }
837 #endif
838  } // end i loop
839 
840  // update FCT matrices as Fct = (1 - Lij)*Fct
841  FCT_h = (1. - Lij_array) * FCT_h;
842  FCT_hu = (1. - Lij_array) * FCT_hu;
843  FCT_hv = (1. - Lij_array) * FCT_hv;
844  FCT_heta = (1. - Lij_array) * FCT_heta;
845  FCT_hw = (1. - Lij_array) * FCT_hw;
846  FCT_hbeta = (1. - Lij_array) * FCT_hbeta;
847  } // end loop for limiting iteration
848 
849  /* Update final solution */
850  for (int i = 0; i < numDOFs; i++) {
851 
852  const double one_over_mi = 1. / lumped_mass_matrix[i];
853 
854  limited_hnp1[i] = hLow[i] + dt * one_over_mi * SourceTerm_h[i];
855  limited_hunp1[i] = huLow[i] + dt * one_over_mi * extendedSourceTerm_hu[i];
856  limited_hvnp1[i] = hvLow[i] + dt * one_over_mi * extendedSourceTerm_hv[i];
857  limited_hetanp1[i] = hetaLow[i] + dt * one_over_mi * SourceTerm_heta[i];
858  limited_hwnp1[i] = hwLow[i] + dt * one_over_mi * SourceTerm_hw[i];
859  limited_hbetanp1[i] =
860  hbetaLow[i] + dt * one_over_mi * SourceTerm_hbeta[i];
861 
862  if (limited_hnp1[i] < -hEps) {
863  std::cout << " \n "
864  << " !!!! Limited water height is negative: !!!! \n"
865  << " hLim = " << limited_hnp1[i] << "\n"
866  << " hEps = " << hEps << "\n"
867  << " h_min = " << h_min[i] << "\n"
868  << " h_max = " << h_max[i] << "\n"
869  << " !!!! ABORTING !!!! \n"
870  << std::endl;
871  abort();
872  } else {
873  // clean up uHigh from round off error
874  if (limited_hnp1[i] < hEps) {
875  limited_hnp1[i] = 0.;
876  }
877 
878  const double aux = fmax(limited_hnp1[i], hEps);
879  limited_hunp1[i] *= 2. * std::pow(limited_hnp1[i], VEL_FIX_POWER) /
880  (std::pow(limited_hnp1[i], VEL_FIX_POWER) +
881  std::pow(aux, VEL_FIX_POWER));
882  limited_hvnp1[i] *= 2. * std::pow(limited_hnp1[i], VEL_FIX_POWER) /
883  (std::pow(limited_hnp1[i], VEL_FIX_POWER) +
884  std::pow(aux, VEL_FIX_POWER));
885  limited_hwnp1[i] *= 2. * std::pow(limited_hnp1[i], VEL_FIX_POWER) /
886  (std::pow(limited_hnp1[i], VEL_FIX_POWER) +
887  std::pow(aux, VEL_FIX_POWER));
888  limited_hbetanp1[i] *= 2. * std::pow(limited_hnp1[i], VEL_FIX_POWER) /
889  (std::pow(limited_hnp1[i], VEL_FIX_POWER) +
890  std::pow(aux, VEL_FIX_POWER));
891  }
892  }
893 
894  } // end convex limiting function
895 
897  const double g = args.scalar<double>("g");
898  const int numDOFsPerEqn = args.scalar<int>("numDOFsPerEqn");
899  const xt::pyarray<double> &lumped_mass_matrix =
900  args.array<double>("lumped_mass_matrix");
901  const xt::pyarray<double> &h_dof_old = args.array<double>("h_dof_old");
902  const xt::pyarray<double> &hu_dof_old = args.array<double>("hu_dof_old");
903  const xt::pyarray<double> &hv_dof_old = args.array<double>("hv_dof_old");
904  const xt::pyarray<double> &heta_dof_old =
905  args.array<double>("heta_dof_old");
906  const xt::pyarray<int> &csrRowIndeces_DofLoops =
907  args.array<int>("csrRowIndeces_DofLoops");
908  const xt::pyarray<int> &csrColumnOffsets_DofLoops =
909  args.array<int>("csrColumnOffsets_DofLoops");
910  const double hEps = args.scalar<double>("hEps");
911  const xt::pyarray<double> &Cx = args.array<double>("Cx");
912  const xt::pyarray<double> &Cy = args.array<double>("Cy");
913  const xt::pyarray<double> &CTx = args.array<double>("CTx");
914  const xt::pyarray<double> &CTy = args.array<double>("CTy");
915  const xt::pyarray<double> &inverse_mesh =
916  args.array<double>("inverse_mesh");
917  xt::pyarray<double> &edge_based_cfl = args.array<double>("edge_based_cfl");
918 
919  double max_edge_based_cfl = 0.;
920  double dLowij = 0.;
921 
922  int ij = 0;
923  for (int i = 0; i < numDOFsPerEqn; i++) {
924 
925  // solution at time tn for the ith DOF
926  const double hi = h_dof_old[i];
927  const double one_over_hi =
928  2. * hi / (hi * hi + std::pow(fmax(hi, hEps), 2));
929  const double hui = hu_dof_old[i];
930  const double hvi = hv_dof_old[i];
931  const double ui = hui * one_over_hi;
932  const double vi = hvi * one_over_hi;
933  const double hetai = heta_dof_old[i];
934  const double mi = lumped_mass_matrix[i];
935  const double inv_meshSizei = inverse_mesh[i];
936 
937  // Initialize diagonal entry
938  double dLowii = 0.;
939 
940  for (int offset = csrRowIndeces_DofLoops[i];
941  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
942 
943  // loop in j (sparsity pattern)
944  const int j = csrColumnOffsets_DofLoops[offset];
945 
946  // solution at time tn for the jth DOF
947  const double hj = h_dof_old[j];
948  const double one_over_hj =
949  2. * hj / (hj * hj + std::pow(fmax(hj, hEps), 2));
950  const double huj = hu_dof_old[j];
951  const double hvj = hv_dof_old[j];
952  const double uj = huj * one_over_hj;
953  const double vj = hvj * one_over_hj;
954  const double hetaj = heta_dof_old[j];
955  const double mj = lumped_mass_matrix[j];
956  const double inv_meshSizej = inverse_mesh[j];
957 
958  if (j != i) {
960  // DISSIPATIVE MATRIX //
962  const double cij_norm = sqrt(Cx[ij] * Cx[ij] + Cy[ij] * Cy[ij]);
963  const double cji_norm = sqrt(CTx[ij] * CTx[ij] + CTy[ij] * CTy[ij]);
964  const double nxij = Cx[ij] / cij_norm, nyij = Cy[ij] / cij_norm;
965  const double nxji = CTx[ij] / cji_norm, nyji = CTy[ij] / cji_norm;
966 
967  const double muijL = fmax(std::abs(ui * Cx[ij] + vi * Cy[ij]),
968  std::abs(uj * CTx[ij] + vj * CTy[ij]));
969  dLowij = fmax(maxWaveSpeedSharpInitialGuess(
970  g, nxij, nyij, hi, hui, hvi, hetai, inv_meshSizei,
971  hj, huj, hvj, hetaj, inv_meshSizej, hEps) *
972  cij_norm,
974  g, nxji, nyji, hj, huj, hvj, hetaj, inv_meshSizej,
975  hi, hui, hvi, hetai, inv_meshSizei, hEps) *
976  cji_norm);
977 
978  // Take max of dij and muij
979  dLowij = fmax(dLowij, muijL);
980 
981  // Define diagonal entry
982  dLowii -= dLowij;
983  }
984 
985  // update ij
986  ij += 1;
987  }
989  // CALCULATE EDGE BASED CFL //
991  edge_based_cfl[i] = 1.0 * fabs(dLowii) / mi;
992  max_edge_based_cfl = fmax(max_edge_based_cfl, edge_based_cfl[i]);
993  }
994 
995  return max_edge_based_cfl;
996  } // End calculateEdgeBasedCFL
997 
999  const double g = args.scalar<double>("g");
1000  const xt::pyarray<double> &h_dof_old = args.array<double>("h_dof_old");
1001  const xt::pyarray<double> &hu_dof_old = args.array<double>("hu_dof_old");
1002  const xt::pyarray<double> &hv_dof_old = args.array<double>("hv_dof_old");
1003  const xt::pyarray<double> &heta_dof_old =
1004  args.array<double>("heta_dof_old");
1005  const double hEps = args.scalar<double>("hEps");
1006  const int numDOFsPerEqn = args.scalar<int>("numDOFsPerEqn");
1007  const xt::pyarray<int> &csrRowIndeces_DofLoops =
1008  args.array<int>("csrRowIndeces_DofLoops");
1009  const xt::pyarray<int> &csrColumnOffsets_DofLoops =
1010  args.array<int>("csrColumnOffsets_DofLoops");
1011  xt::pyarray<double> &entropy = args.array<double>("entropy");
1012  xt::pyarray<double> &delta_Sqd_h = args.array<double>("delta_Sqd_h");
1013  xt::pyarray<double> &delta_Sqd_heta = args.array<double>("delta_Sqd_heta");
1014  xt::pyarray<double> &thetaj_inv = args.array<double>("thetaj_inv");
1015  double &dij_small = args.scalar<double>("dij_small");
1016  const double h0_max = args.scalar<double>("h0_max");
1017  const xt::pyarray<double> &Cx = args.array<double>("Cx");
1018  const xt::pyarray<double> &Cy = args.array<double>("Cy");
1019 
1020  const double speed = std::sqrt(g * h0_max);
1021  dij_small = 0.;
1022 
1024  // ********** LOOP ON DOFs ********** //
1026  // To compute:
1027  // * Entropy at i-th nodes
1028  // * thetaj_inv at i-th nodes
1029  // * second order differences at i-th node
1030  // * dij_small
1031 
1032  int ij = 0;
1033  for (int i = 0; i < numDOFsPerEqn; i++) {
1034 
1035  // Define things at ith node
1036  const double hi = h_dof_old[i];
1037  const double hu_i = hu_dof_old[i];
1038  const double hv_i = hv_dof_old[i];
1039  const double hetai = heta_dof_old[i];
1040  const double one_over_hi =
1041  2. * hi / (hi * hi + std::pow(fmax(hi, hEps), 2)); // hEps
1042  const double u_i = hu_i * one_over_hi;
1043  const double v_i = hv_i * one_over_hi;
1044  const double kin_i = 0.5 * hi * (u_i * u_i + v_i * v_i);
1045 
1046  // Compute entropy (with a flat bottom)
1047  entropy[i] = ENTROPY(g, hi, hu_i, hv_i, 0., one_over_hi);
1048 
1049  // Define convex coefficients for each i
1050  thetaj_inv[i] =
1051  1. / (csrRowIndeces_DofLoops[i + 1] - csrRowIndeces_DofLoops[i] - 1);
1052 
1053  // Initialize sum of differences
1054  double h_diff = 0.;
1055  double heta_diff = 0.;
1056  double kin_diff = 0.;
1057 
1058  for (int offset = csrRowIndeces_DofLoops[i];
1059  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
1060  const int j = csrColumnOffsets_DofLoops[offset];
1061 
1062  // Define things at jth node
1063  const double hj = h_dof_old[j];
1064  const double hu_j = hu_dof_old[j];
1065  const double hv_j = hv_dof_old[j];
1066  const double hetaj = heta_dof_old[j];
1067  const double one_over_hj =
1068  2. * hj / (hj * hj + std::pow(fmax(hj, hEps), 2));
1069  const double u_j = hu_j * one_over_hj;
1070  const double v_j = hv_j * one_over_hj;
1071  const double kin_j = 0.5 * hj * (u_j * u_j + v_j * v_j);
1072 
1073  /* To compute:
1074  * Second-difference relaxation quantities
1075  */
1076  if (j != i) {
1077  h_diff += hi - hj;
1078  heta_diff += hetai - hetaj;
1079  kin_diff += kin_i - kin_j;
1080  }
1081 
1082  // Compute dij_small
1083  const double x = fabs(Cx[ij]) + fabs(Cy[ij]);
1084  dij_small = fmax(dij_small, x * speed);
1085 
1086  ij += 1;
1087  } // j loop ends here
1088 
1089  // Set differences
1090  delta_Sqd_h[i] = h_diff;
1091  delta_Sqd_heta[i] = heta_diff;
1092 
1093  } // i loop ends here
1094 
1095  // Define final dij_small
1096  dij_small = 1E-14 * dij_small;
1097 
1098  } // end calculatePreStep
1099 
1101  const double g = args.scalar<double>("g");
1102  const xt::pyarray<double> &h_dof_old = args.array<double>("h_dof_old");
1103  const xt::pyarray<double> &hu_dof_old = args.array<double>("hu_dof_old");
1104  const xt::pyarray<double> &hv_dof_old = args.array<double>("hv_dof_old");
1105  const xt::pyarray<double> &Cx = args.array<double>("Cx");
1106  const xt::pyarray<double> &Cy = args.array<double>("Cy");
1107  const xt::pyarray<double> &CTx = args.array<double>("CTx");
1108  const xt::pyarray<double> &CTy = args.array<double>("CTy");
1109  const int numDOFsPerEqn = args.scalar<int>("numDOFsPerEqn");
1110  const xt::pyarray<int> &csrRowIndeces_DofLoops =
1111  args.array<int>("csrRowIndeces_DofLoops");
1112  const xt::pyarray<int> &csrColumnOffsets_DofLoops =
1113  args.array<int>("csrColumnOffsets_DofLoops");
1114  const xt::pyarray<double> &lumped_mass_matrix =
1115  args.array<double>("lumped_mass_matrix");
1116  const double hEps = args.scalar<double>("hEps");
1117  xt::pyarray<double> &global_entropy_residual =
1118  args.array<double>("global_entropy_residual");
1119  const xt::pyarray<double> &entropy = args.array<double>("entropy");
1120  const double h0_max = args.scalar<double>("h0_max");
1121 
1123  // ********** FIRST LOOP ON DOFs ********** //
1125  // To compute:
1126  // * global entropy residual
1127 
1128  int ij = 0;
1129  for (int i = 0; i < numDOFsPerEqn; i++) {
1130 
1131  // solution at time tn for the ith DOF
1132  const double hi = h_dof_old[i];
1133  const double hui = hu_dof_old[i];
1134  const double hvi = hv_dof_old[i];
1135  const double one_over_hiReg =
1136  2. * hi / (hi * hi + std::pow(fmax(hi, hEps), 2)); // hEps
1137  const double ui = hui * one_over_hiReg;
1138  const double vi = hvi * one_over_hiReg;
1139  const double mi = lumped_mass_matrix[i];
1140 
1141  // initialize some things for entropy residual
1142  double etaMax_i = fabs(entropy[i]);
1143  double etaMin_i = fabs(entropy[i]);
1144 
1145  double ith_flux_term1 = 0., ith_flux_term2 = 0., ith_flux_term3 = 0.;
1146  double entropy_flux = 0.;
1147  double sum_entprime_flux = 0.;
1148  const double eta_prime1 =
1149  DENTROPY_DH(g, hi, hui, hvi, 0., one_over_hiReg);
1150  const double eta_prime2 =
1151  DENTROPY_DHU(g, hi, hui, hvi, 0., one_over_hiReg);
1152  const double eta_prime3 =
1153  DENTROPY_DHV(g, hi, hui, hvi, 0., one_over_hiReg);
1154 
1155  // loop in j (sparsity pattern)
1156  for (int offset = csrRowIndeces_DofLoops[i];
1157  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
1158 
1159  int j = csrColumnOffsets_DofLoops[offset];
1160 
1161  // solution at time tn for the jth DOF
1162  const double hj = h_dof_old[j];
1163  const double huj = hu_dof_old[j];
1164  const double hvj = hv_dof_old[j];
1165  const double one_over_hjReg =
1166  2. * hj / (hj * hj + std::pow(fmax(hj, hEps), 2));
1167  const double uj = huj * one_over_hjReg;
1168  const double vj = hvj * one_over_hjReg;
1169 
1170  // auxiliary functions to compute fluxes
1171  const double aux_h =
1172  (uj * hj - ui * hi) * Cx[ij] + (vj * hj - vi * hi) * Cy[ij];
1173  const double aux_hu =
1174  (uj * huj - ui * hui) * Cx[ij] + (vj * huj - vi * hui) * Cy[ij];
1175  const double aux_hv =
1176  (uj * hvj - ui * hvi) * Cx[ij] + (vj * hvj - vi * hvi) * Cy[ij];
1177 
1178  // get regular flux
1179  ith_flux_term1 += aux_h;
1180  ith_flux_term2 += aux_hu + 0.5 * g * hj * hj * Cx[ij];
1181  ith_flux_term3 += aux_hv + 0.5 * g * hj * hj * Cy[ij];
1182 
1183  // define entropy flux with flat topography
1184  entropy_flux +=
1185  (Cx[ij] * ENTROPY_FLUX1(g, hj, huj, hvj, 0., one_over_hjReg) +
1186  Cy[ij] * ENTROPY_FLUX2(g, hj, huj, hvj, 0., one_over_hjReg));
1187 
1188  // compute eta_max and eta_min
1189  etaMax_i = fmax(etaMax_i, fabs(entropy[j]));
1190  etaMin_i = fmin(etaMin_i, fabs(entropy[j]));
1191 
1192  // update ij
1193  ij += 1;
1194  } // end j loop
1195 
1196  // define sum of entprime*flux
1197  sum_entprime_flux =
1198  (ith_flux_term1 * eta_prime1 + ith_flux_term2 * eta_prime2 +
1199  ith_flux_term3 * eta_prime3);
1200 
1201  // define rescale for normalization
1202  const double small_rescale = g * hEps * h0_max;
1203  const double rescale =
1204  fmax(fabs(etaMax_i - etaMin_i) / 2., small_rescale);
1205 
1206  // compute entropy residual
1207  const double one_over_entNormFactori = 1. / rescale;
1208  global_entropy_residual[i] =
1209  one_over_entNormFactori * fabs(entropy_flux - sum_entprime_flux);
1210 
1211  // if "dry" state, set residual to 1
1212  if (hi <= hEps) {
1213  global_entropy_residual[i] = 1.;
1214  }
1215  } // end i loop
1216 
1217  // ********** END OF LOOP IN DOFs ********** //
1218  } // end calculateEV
1219 
1221  const double g = args.scalar<double>("g");
1222  const xt::pyarray<double> &h_dof_old = args.array<double>("h_dof_old");
1223  const xt::pyarray<double> &hu_dof_old = args.array<double>("hu_dof_old");
1224  const xt::pyarray<double> &hv_dof_old = args.array<double>("hv_dof_old");
1225  const xt::pyarray<double> &heta_dof_old =
1226  args.array<double>("heta_dof_old");
1227  const xt::pyarray<double> &hw_dof_old = args.array<double>("hw_dof_old");
1228  const xt::pyarray<double> &hbeta_dof_old =
1229  args.array<double>("hbeta_dof_old");
1230  const xt::pyarray<double> &b_dof = args.array<double>("b_dof");
1231  const xt::pyarray<double> &Cx = args.array<double>("Cx");
1232  const xt::pyarray<double> &Cy = args.array<double>("Cy");
1233  const xt::pyarray<double> &CTx = args.array<double>("CTx");
1234  const xt::pyarray<double> &CTy = args.array<double>("CTy");
1235  const int numDOFsPerEqn = args.scalar<int>("numDOFsPerEqn");
1236  const xt::pyarray<int> &csrRowIndeces_DofLoops =
1237  args.array<int>("csrRowIndeces_DofLoops");
1238  const xt::pyarray<int> &csrColumnOffsets_DofLoops =
1239  args.array<int>("csrColumnOffsets_DofLoops");
1240  const xt::pyarray<double> &lumped_mass_matrix =
1241  args.array<double>("lumped_mass_matrix");
1242  const double hEps = args.scalar<double>("hEps");
1243  xt::pyarray<double> &SourceTerm_h = args.array<double>("SourceTerm_h");
1244  xt::pyarray<double> &SourceTerm_hu = args.array<double>("SourceTerm_hu");
1245  xt::pyarray<double> &SourceTerm_hv = args.array<double>("SourceTerm_hv");
1246  xt::pyarray<double> &SourceTerm_heta =
1247  args.array<double>("SourceTerm_heta");
1248  xt::pyarray<double> &SourceTerm_hw = args.array<double>("SourceTerm_hw");
1249  xt::pyarray<double> &SourceTerm_hbeta =
1250  args.array<double>("SourceTerm_hbeta");
1251  const double dt = args.scalar<double>("dt");
1252  const double mannings = args.scalar<double>("mannings");
1253  const int lstage = args.scalar<int>("lstage");
1254  xt::pyarray<double> &global_entropy_residual =
1255  args.array<double>("global_entropy_residual");
1256  const double dij_small = args.scalar<double>("dij_small");
1257  xt::pyarray<double> &hLow = args.array<double>("hLow");
1258  xt::pyarray<double> &huLow = args.array<double>("huLow");
1259  xt::pyarray<double> &hvLow = args.array<double>("hvLow");
1260  xt::pyarray<double> &hetaLow = args.array<double>("hetaLow");
1261  xt::pyarray<double> &hwLow = args.array<double>("hwLow");
1262  xt::pyarray<double> &hbetaLow = args.array<double>("hbetaLow");
1263  xt::pyarray<double> &h_min = args.array<double>("h_min");
1264  xt::pyarray<double> &h_max = args.array<double>("h_max");
1265  xt::pyarray<double> &heta_min = args.array<double>("heta_min");
1266  xt::pyarray<double> &heta_max = args.array<double>("heta_max");
1267  xt::pyarray<double> &kin_max = args.array<double>("kin_max");
1268  const xt::pyarray<double> &x_values = args.array<double>("x_values");
1269  const double x_min = args.scalar<double>("x_min");
1270  const double x_max = args.scalar<double>("x_min");
1271  const xt::pyarray<double> &inverse_mesh =
1272  args.array<double>("inverse_mesh");
1273  const double h0_max = args.scalar<double>("h0_max");
1274  xt::pyarray<double> &RHS_high_h = args.array<double>("RHS_high_h");
1275  xt::pyarray<double> &RHS_high_hu = args.array<double>("RHS_high_hu");
1276  xt::pyarray<double> &RHS_high_hv = args.array<double>("RHS_high_hv");
1277  xt::pyarray<double> &RHS_high_heta = args.array<double>("RHS_high_heta");
1278  xt::pyarray<double> &RHS_high_hw = args.array<double>("RHS_high_hw");
1279  xt::pyarray<double> &RHS_high_hbeta = args.array<double>("RHS_high_hbeta");
1280  xt::pyarray<double> &extendedSourceTerm_hu =
1281  args.array<double>("extendedSourceTerm_hu");
1282  xt::pyarray<double> &extendedSourceTerm_hv =
1283  args.array<double>("extendedSourceTerm_hv");
1284  double size_of_domain = args.scalar<double>("size_of_domain");
1285  xt::pyarray<double> &delta_Sqd_h = args.array<double>("delta_Sqd_h");
1286  xt::pyarray<double> &delta_Sqd_heta = args.array<double>("delta_Sqd_heta");
1287  const double gen_length = args.scalar<double>("gen_length");
1288  const double gen_start = args.scalar<double>("gen_start");
1289  const double abs_length = args.scalar<double>("abs_length");
1290  const double abs_start = args.scalar<double>("abs_start");
1291  // xt::pyarray<int> &relaxation_zone_nodes =
1292  // args.array<int>("relaxation_zone_nodes");
1293  xt::pyarray<double> &h_wave = args.array<double>("h_wave");
1294  xt::pyarray<double> &h_u_wave = args.array<double>("h_u_wave");
1295  xt::pyarray<double> &h_v_wave = args.array<double>("h_v_wave");
1296  xt::pyarray<double> &h_eta_wave = args.array<double>("h_eta_wave");
1297  xt::pyarray<double> &h_w_wave = args.array<double>("h_w_wave");
1298  xt::pyarray<double> &h_beta_wave = args.array<double>("h_beta_wave");
1299 
1300  /* Define constants for sources here. Note that these do not depend on the
1301  * DOFs so we should only compute once */
1302  const double n2 = std::pow(mannings, 2.);
1303  const double gamma = 4. / 3;
1304  const double xi = 10.;
1305  const double alpha = 0.005;
1306  /* ---------------------------------- */
1307 
1309  // ********** FIRST SET OF LOOP ON DOFs ********** //
1311  // To compute:
1312  // * Soure terms
1313  // * Low order solution (in terms of bar states)
1314  // * Local bounds for limiting
1315  // * High-order right hand side
1316 
1317  /* ----------- Here we do some initial declaration --------------- */
1318  // Bar states
1319  std::valarray<double> hBT(0., Cx.size()), huBT(0., Cx.size()),
1320  hvBT(0., Cx.size()), hetaBT(0., Cx.size()), hwBT(0., Cx.size()),
1321  hbetaBT(0., Cx.size()), dLow(0., Cx.size());
1322 
1323  // Relaxation quantities
1324  std::valarray<double> bar_deltaSqd_h(0., numDOFsPerEqn),
1325  bar_deltaSqd_heta(0., numDOFsPerEqn);
1326 
1327  double high_viscosity_h, high_viscosity_hu, high_viscosity_hv,
1328  high_viscosity_heta, high_viscosity_hw, high_viscosity_hbeta;
1329 
1330  /* First loop to compute:
1331  1. bar states
1332  2. Sources
1333  3 high-order RHS
1334  */
1335  int ij = 0;
1336  for (int i = 0; i < numDOFsPerEqn; i++) {
1337 
1338  // define things at ith node
1339  const double hi = h_dof_old[i];
1340  const double hui = hu_dof_old[i];
1341  const double hvi = hv_dof_old[i];
1342  const double hetai = heta_dof_old[i];
1343  const double hwi = hw_dof_old[i];
1344  const double hbetai = hbeta_dof_old[i];
1345  const double Zi = b_dof[i];
1346  const double mi = lumped_mass_matrix[i];
1347  const double one_over_hiReg =
1348  2. * hi / (hi * hi + std::pow(fmax(hi, hEps), 2));
1349  const double ui = hui * one_over_hiReg;
1350  const double vi = hvi * one_over_hiReg;
1351  const double etai = hetai * one_over_hiReg;
1352  const double inv_meshSizei = inverse_mesh[i];
1353  const double x_i = x_values[i]; // x value at ith dof
1354 
1355  // Initialize Sources to 0
1356  SourceTerm_h[i] = 0.;
1357  SourceTerm_hu[i] = 0.;
1358  SourceTerm_hv[i] = 0.;
1359  SourceTerm_heta[i] = 0.;
1360  SourceTerm_hw[i] = 0.;
1361  SourceTerm_hbeta[i] = 0.;
1362 
1363  // Define pTilde at ith node here
1364  double pTildei =
1365  -(LAMBDA_MGN * g / 3. * inv_meshSizei) * 6. * hi * (hetai - hi * hi);
1366  if (IF_BOTH_GAMMA_BRANCHES) {
1367  double diff_over_h_i = (hetai - hi * hi) * one_over_hiReg;
1368  if (hetai > std::pow(hi, 2.0)) {
1369  pTildei = -(LAMBDA_MGN * g / 3.0 * inv_meshSizei) * 2.0 *
1370  diff_over_h_i * (etai * etai + etai * hi + hi * hi);
1371  }
1372  }
1373 
1374  // Define full pressure
1375  const double pressure_i = 0.5 * g * hi * hi + pTildei;
1376 
1377  // For extended topography source for bar state verison of low-order
1378  extendedSourceTerm_hu[i] = 0.;
1379  extendedSourceTerm_hv[i] = 0.;
1380 
1381  /* ---- For nodal sources, part 1 (assuming source is on RHS) ---------*/
1382 
1383  // Friction terms
1384  const double veli_norm = std::sqrt(ui * ui + vi * vi);
1385  const double hi_to_the_gamma = std::pow(fmax(hi, hEps), gamma);
1386  const double friction_aux =
1387  veli_norm == 0.
1388  ? 0.
1389  : (2 * g * n2 * veli_norm * mi /
1390  (hi_to_the_gamma +
1391  fmax(hi_to_the_gamma, xi * g * n2 * dt * veli_norm)));
1392  SourceTerm_hu[i] = -friction_aux * hui;
1393  SourceTerm_hv[i] = -friction_aux * hvi;
1394 
1395  // Set flux sum terms to 0.
1396  double sum_flux_h = 0.;
1397  double sum_flux_hu = 0.;
1398  double sum_flux_hv = 0.;
1399  double sum_flux_heta = 0.;
1400  double sum_flux_hw = 0.;
1401  double sum_flux_hbeta = 0.;
1402 
1403  // Initialize sum for high-order viscosity terms
1404  high_viscosity_h = 0.;
1405  high_viscosity_hu = 0.;
1406  high_viscosity_hv = 0.;
1407  high_viscosity_heta = 0.;
1408  high_viscosity_hw = 0.;
1409  high_viscosity_hbeta = 0.;
1410 
1411  // Set some values to 0 for ith node
1412  double grad_Z_x_i = 0.;
1413  double grad_Z_y_i = 0.;
1414 
1415  const double card_inv =
1416  1. / (csrRowIndeces_DofLoops[i + 1] - csrRowIndeces_DofLoops[i]);
1417 
1418  // loop over the sparsity pattern of the i-th DOF
1419  for (int offset = csrRowIndeces_DofLoops[i];
1420  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
1421  const int j = csrColumnOffsets_DofLoops[offset];
1422 
1423  // define things at jth node
1424  const double hj = h_dof_old[j];
1425  const double huj = hu_dof_old[j];
1426  const double hvj = hv_dof_old[j];
1427  const double hetaj = heta_dof_old[j];
1428  const double hwj = hw_dof_old[j];
1429  const double hbetaj = hbeta_dof_old[j];
1430  const double Zj = b_dof[j];
1431  const double one_over_hjReg =
1432  2. * hj / (hj * hj + std::pow(fmax(hj, hEps), 2));
1433  const double uj = huj * one_over_hjReg;
1434  const double vj = hvj * one_over_hjReg;
1435  const double etaj = hetaj * one_over_hjReg;
1436  const double mj = lumped_mass_matrix[j];
1437  const double inv_meshSizej = inverse_mesh[j]; // local mesh size in 2d
1438 
1439  // Define grad_Z_x and grad_Z_y
1440  grad_Z_x_i += Zj * Cx[ij];
1441  grad_Z_y_i += Zj * Cy[ij];
1442 
1443  // Define pTilde at jth node
1444  double pTildej = -(LAMBDA_MGN * g / 3. * inv_meshSizej) * 6. * hj *
1445  (hetaj - hj * hj);
1446  if (IF_BOTH_GAMMA_BRANCHES) {
1447  double diff_over_h_j = (hetaj - hj * hj) * one_over_hjReg;
1448  if (hetaj > std::pow(hj, 2.0)) {
1449  pTildej = -(LAMBDA_MGN * g / 3.0 * inv_meshSizej) * 2.0 *
1450  diff_over_h_j * (etaj * etaj + etaj * hj + hj * hj);
1451  }
1452  }
1453 
1454  // Define full pressure at jth node
1455  const double pressure_j = 0.5 * g * hj * hj + pTildej;
1456 
1457  // Define extended source term for hu and hv
1458  extendedSourceTerm_hu[i] +=
1459  g * (-hi * Zj + 0.5 * (hj - hi) * (hj - hi)) * Cx[ij];
1460  extendedSourceTerm_hv[i] +=
1461  g * (-hi * Zj + 0.5 * (hj - hi) * (hj - hi)) * Cy[ij];
1462 
1463  /* ------- Define hyperbolic fluxes (for bar states) ------------ */
1464  const double flux_h =
1465  (hj * uj - hi * ui) * Cx[ij] + (hj * vj - hi * vi) * Cy[ij];
1466  //
1467  const double aux_hu =
1468  (uj * huj - ui * hui) * Cx[ij] + (vj * huj - vi * hui) * Cy[ij];
1469  const double flux_hu = aux_hu + (pressure_j - pressure_i) * Cx[ij];
1470  //
1471  const double aux_hv =
1472  (uj * hvj - ui * hvi) * Cx[ij] + (vj * hvj - vi * hvi) * Cy[ij];
1473  const double flux_hv = aux_hv + (pressure_j - pressure_i) * Cy[ij];
1474  //
1475  const double flux_heta = (uj * hj * etaj - ui * hi * etai) * Cx[ij] +
1476  (vj * hj * etaj - vi * hi * etai) * Cy[ij];
1477  const double flux_hw =
1478  (uj * hwj - ui * hwi) * Cx[ij] + (vj * hwj - vi * hwi) * Cy[ij];
1479  //
1480  const double flux_hbeta = (uj * hbetaj - ui * hbetai) * Cx[ij] +
1481  (vj * hbetaj - vi * hbetai) * Cy[ij];
1482  /* --------------------------------------------------------- */
1483 
1484  /* ------ Sum flux terms with well-balancing term on hu,hv ---------- */
1485  sum_flux_h += flux_h;
1486  sum_flux_hu += aux_hu + (g * hi * (hj + Zj) + pTildej) * Cx[ij];
1487  sum_flux_hv += aux_hv + (g * hi * (hj + Zj) + pTildej) * Cy[ij];
1488  sum_flux_heta += flux_heta;
1489  sum_flux_hw += flux_hw;
1490  sum_flux_hbeta += flux_hbeta;
1491  /* -------------------------------------------- */
1492 
1493  // Initialize low and high graph-viscosity coefficients to 0
1494  double muLij = 0., muHij = 0.;
1495  double dLowij = 0., dLij = 0., dHij = 0.;
1496 
1497  if (j != i) {
1498  // Put these computations first before it gets messy
1499  bar_deltaSqd_h[i] += 0.5 * delta_Sqd_h[j] + 0.5 * delta_Sqd_h[i];
1500  bar_deltaSqd_heta[i] +=
1501  0.5 * delta_Sqd_heta[j] + 0.5 * delta_Sqd_heta[i];
1502 
1503  /* ------------ Define viscosity ------------ */
1504  const double cij_norm = sqrt(Cx[ij] * Cx[ij] + Cy[ij] * Cy[ij]);
1505  const double cji_norm = sqrt(CTx[ij] * CTx[ij] + CTy[ij] * CTy[ij]);
1506  const double nxij = Cx[ij] / cij_norm;
1507  const double nyij = Cy[ij] / cij_norm;
1508  const double nxji = CTx[ij] / cji_norm;
1509  const double nyji = CTy[ij] / cji_norm;
1510  dLowij = fmax(maxWaveSpeedSharpInitialGuess(
1511  g, nxij, nyij, hi, hui, hvi, hetai, inv_meshSizei,
1512  hj, huj, hvj, hetaj, inv_meshSizej, hEps) *
1513  cij_norm,
1515  g, nxji, nyji, hj, huj, hvj, hetaj, inv_meshSizej,
1516  hi, hui, hvi, hetai, inv_meshSizei, hEps) *
1517  cji_norm);
1518 
1519  // Low-order graph viscoscity coefficients
1520  muLij = fmax(std::abs(ui * Cx[ij] + vi * Cy[ij]),
1521  std::abs(uj * CTx[ij] + vj * CTy[ij]));
1522  dLij = fmax(dLowij, muLij);
1523 
1524  // Safe for computation of bounds below (do not pass to others)
1525  dLow[ij] = dLij;
1526 
1527  // High-order graph viscosity coefficients
1528  const double dEVij =
1529  fmax(global_entropy_residual[i], global_entropy_residual[j]);
1530  dHij = fmin(dLij, dEVij);
1531  muHij = fmin(muLij, dEVij);
1532  /* --------------------------------------------- */
1533 
1534  /* ---------- Compute star states ------------ */
1535  const double hStarij = fmax(0., hi + Zi - fmax(Zi, Zj));
1536  const double hStarji = fmax(0., hj + Zj - fmax(Zi, Zj));
1537  const double hStar_ratio_i = hStarij * one_over_hiReg;
1538  const double hStar_ratio_j = hStarji * one_over_hjReg;
1539  //
1540  const double huStarij = hui * hStar_ratio_i;
1541  const double hvStarij = hvi * hStar_ratio_i;
1542  const double hetaStarij = hetai * std::pow(hStar_ratio_i, 2);
1543  const double hwStarij = hwi * hStar_ratio_i;
1544  const double hbetaStarij = hbetai * hStar_ratio_i;
1545  //
1546  const double huStarji = huj * hStar_ratio_j;
1547  const double hvStarji = hvj * hStar_ratio_j;
1548  const double hetaStarji = hetaj * std::pow(hStar_ratio_j, 2);
1549  const double hwStarji = hwj * hStar_ratio_j;
1550  const double hbetaStarji = hbetaj * hStar_ratio_j;
1551  /* ------------------------------------------- */
1552 
1553  /* --------- Bar states for i not equal j --------- */
1554  double hBar_ij = 0., hTilde_ij = 0., huBar_ij = 0., huTilde_ij = 0.,
1555  hvBar_ij = 0., hvTilde_ij = 0., hetaBar_ij = 0.,
1556  hetaTilde_ij = 0., hwBar_ij = 0., hwTilde_ij = 0.,
1557  hbetaBar_ij = 0., hbetaTilde_ij = 0.;
1558 
1559  const double half_dij_inv = -0.5 / fmax(dLij, dij_small);
1560  const double visc_ratio = -half_dij_inv * (dLij - muLij);
1561 
1562  // h component
1563  hBar_ij = half_dij_inv * flux_h + 0.5 * (hj + hi);
1564  hTilde_ij = visc_ratio * (hStarji - hj - (hStarij - hi));
1565 
1566  // hu component
1567  huBar_ij = half_dij_inv * flux_hu + 0.5 * (huj + hui);
1568  huTilde_ij = visc_ratio * (huStarji - huj - (huStarij - hui));
1569 
1570  // hv component
1571  hvBar_ij = half_dij_inv * flux_hv + 0.5 * (hvj + hvi);
1572  hvTilde_ij = visc_ratio * (hvStarji - hvj - (hvStarij - hvi));
1573 
1574  // heta component
1575  hetaBar_ij = half_dij_inv * flux_heta + 0.5 * (hetaj + hetai);
1576  hetaTilde_ij =
1577  visc_ratio * (hetaStarji - hetaj - (hetaStarij - hetai));
1578 
1579  // hw component
1580  hwBar_ij = half_dij_inv * flux_hw + 0.5 * (hwj + hwi);
1581  hwTilde_ij = visc_ratio * (hwStarji - hwj - (hwStarij - hwi));
1582 
1583  // hbeta component
1584  hbetaBar_ij = half_dij_inv * flux_hbeta + 0.5 * (hbetaj + hbetai);
1585  hbetaTilde_ij =
1586  visc_ratio * (hbetaStarji - hbetaj - (hbetaStarij - hbetai));
1587 
1588  /* Define bar states here. Note we take max with 0 for hBT due to
1589  potential round off */
1590  hBT[ij] = std::max(hBar_ij + hTilde_ij, 0.);
1591  huBT[ij] = huBar_ij + huTilde_ij;
1592  hvBT[ij] = hvBar_ij + hvTilde_ij;
1593  hetaBT[ij] = hetaBar_ij + hetaTilde_ij;
1594  hwBT[ij] = hwBar_ij + hwTilde_ij;
1595  hbetaBT[ij] = hbetaBar_ij + hbetaTilde_ij;
1596  /* ------------------------------------------- */
1597 
1598  /* --------- Sum high viscosity terms ------- */
1599  high_viscosity_h +=
1600  (dHij - muHij) * (hStarji - hStarij) + muHij * (hj - hi);
1601 
1602  high_viscosity_hu +=
1603  (dHij - muHij) * (huStarji - huStarij) + muHij * (huj - hui);
1604 
1605  high_viscosity_hv +=
1606  (dHij - muHij) * (hvStarji - hvStarij) + muHij * (hvj - hvi);
1607 
1608  high_viscosity_heta += (dHij - muHij) * (hetaStarji - hetaStarij) +
1609  muHij * (hetaj - hetai);
1610 
1611  high_viscosity_hw +=
1612  (dHij - muHij) * (hwStarji - hwStarij) + muHij * (hwj - hwi);
1613 
1614  high_viscosity_hbeta += (dHij - muHij) * (hbetaStarji - hbetaStarij) +
1615  muHij * (hbetaj - hbetai);
1616  /* ------------------------------------------- */
1617 
1618  } else {
1619  // Bar states by definition satisfy Utilde_ii + Ubar_ii = U_i
1620  hBT[ij] = hi;
1621  huBT[ij] = hui;
1622  hvBT[ij] = hvi;
1623  hetaBT[ij] = hetai;
1624  hwBT[ij] = hwi;
1625  hbetaBT[ij] = hbetai;
1626  }
1627 
1628  // UPDATE ij //
1629  ij += 1;
1630  } // j loop ends here
1631 
1632  // for bar_deltaSqd_h, bar_deltaSqd_heta
1633  bar_deltaSqd_h[i] *= card_inv * 0.5;
1634  bar_deltaSqd_heta[i] *= card_inv * 0.5;
1635 
1636  /* ---- For nodal sources, part 2 (assuming source is on RHS) ---------*/
1637  // PDE source terms
1638  double hSqd_GammaPi = 6.0 * (hetai - hi * hi);
1639  if (IF_BOTH_GAMMA_BRANCHES) {
1640  const double diff_over_h_i = (hetai - hi * hi) * one_over_hiReg;
1641  if (hetai > std::pow(hi, 2.0)) {
1642  hSqd_GammaPi = 6.0 * etai * diff_over_h_i;
1643  }
1644  }
1645 
1646  const double q_dot_gradZ = hui * grad_Z_x_i + hvi * grad_Z_y_i;
1647  const double R1 = hwi - 1.5 * q_dot_gradZ / mi;
1648  const double R2 = LAMBDA_MGN * g * inv_meshSizei * hSqd_GammaPi;
1649  const double R3 = LAMBDA_MGN * std::sqrt(g * h0_max) * inv_meshSizei *
1650  (q_dot_gradZ / mi - hbetai);
1651 
1652  SourceTerm_h[i] += 0.;
1653  SourceTerm_hu[i] += (0.5 * R2 - 0.25 * R3) * grad_Z_x_i;
1654  SourceTerm_hv[i] += (0.5 * R2 - 0.25 * R3) * grad_Z_y_i;
1655  SourceTerm_heta[i] += mi * R1;
1656  SourceTerm_hw[i] += -mi * R2;
1657  SourceTerm_hbeta[i] += mi * R3;
1658 
1659  /* Generation/absorption sources */
1660  if (gen_length > 0.) {
1661  const double shift = -(gen_start - gen_length);
1662  const double xhat = (x_i + shift) / gen_length;
1663  const double function_gen = relaxation(xhat, alpha);
1664 
1665  SourceTerm_h[i] += -mi * std::sqrt(g * h0_max) * inv_meshSizei *
1666  function_gen * (hi - h_wave[i]);
1667  SourceTerm_hu[i] += -mi * std::sqrt(g * h0_max) * inv_meshSizei *
1668  function_gen * (hui - h_u_wave[i]);
1669  SourceTerm_hv[i] += -mi * std::sqrt(g * h0_max) * inv_meshSizei *
1670  function_gen * (hvi - h_v_wave[i]);
1671  SourceTerm_heta[i] += -mi * std::sqrt(g * h0_max) * inv_meshSizei *
1672  function_gen * (hetai - h_eta_wave[i]);
1673  SourceTerm_hw[i] += -mi * std::sqrt(g * h0_max) * inv_meshSizei *
1674  function_gen * (hwi - h_w_wave[i]);
1675  SourceTerm_hbeta[i] += -mi * std::sqrt(g * h0_max) * inv_meshSizei *
1676  function_gen * (hbetai - 0.);
1677  // for hu and hv
1678  extendedSourceTerm_hu[i] += -mi * std::sqrt(g * h0_max) *
1679  inv_meshSizei * function_gen *
1680  (hui - h_u_wave[i]);
1681  extendedSourceTerm_hv[i] += -mi * std::sqrt(g * h0_max) *
1682  inv_meshSizei * function_gen *
1683  (hvi - h_v_wave[i]);
1684  }
1685 
1686  if (abs_length > 0.) {
1687  const double shift = (abs_start + abs_length);
1688  const double xhat = (shift - x_i) / abs_length;
1689  const double function_gen = relaxation(xhat, alpha);
1690 
1691  SourceTerm_hu[i] += -mi * std::sqrt(g * h0_max) * inv_meshSizei *
1692  function_gen * (hui - 0.);
1693  SourceTerm_hv[i] += -mi * std::sqrt(g * h0_max) * inv_meshSizei *
1694  function_gen * (hvi - 0.);
1695  SourceTerm_hw[i] += -mi * std::sqrt(g * h0_max) * inv_meshSizei *
1696  function_gen * (hwi - 0.);
1697  SourceTerm_hbeta[i] += -mi * std::sqrt(g * h0_max) * inv_meshSizei *
1698  function_gen * (hbetai - 0.);
1699  // for hu and hv
1700  extendedSourceTerm_hu[i] += -mi * std::sqrt(g * h0_max) *
1701  inv_meshSizei * function_gen * (hui - 0.);
1702  extendedSourceTerm_hv[i] += -mi * std::sqrt(g * h0_max) *
1703  inv_meshSizei * function_gen * (hvi - 0.);
1704  }
1705  /* ----------------------------------------------------- */
1706 
1707  /* ----------- Save high order RHS -------------------- */
1708  RHS_high_h[i] = SourceTerm_h[i] - sum_flux_h + high_viscosity_h;
1709  RHS_high_hu[i] = SourceTerm_hu[i] - sum_flux_hu + high_viscosity_hu;
1710  RHS_high_hv[i] = SourceTerm_hv[i] - sum_flux_hv + high_viscosity_hv;
1711  RHS_high_heta[i] =
1712  SourceTerm_heta[i] - sum_flux_heta + high_viscosity_heta;
1713  RHS_high_hw[i] = SourceTerm_hw[i] - sum_flux_hw + high_viscosity_hw;
1714  RHS_high_hbeta[i] =
1715  SourceTerm_hbeta[i] - sum_flux_hbeta + high_viscosity_hbeta;
1716  /* ----------------------------------------------------*/
1717 
1718  } // i loops ends here
1719 
1720  /* Then final loop to get low order solution and local bounds */
1721  ij = 0;
1722  for (int i = 0; i < numDOFsPerEqn; i++) {
1723 
1724  /* Define things at ith node */
1725  const double hi = h_dof_old[i];
1726  const double hu_i = hu_dof_old[i];
1727  const double hv_i = hv_dof_old[i];
1728  const double hetai = heta_dof_old[i];
1729  const double hwi = hw_dof_old[i];
1730  const double hbetai = hbeta_dof_old[i];
1731  const double one_over_hi =
1732  2. * hi / (hi * hi + std::pow(fmax(hi, hEps), 2)); // hEps
1733  const double u_i = hu_i * one_over_hi;
1734  const double v_i = hv_i * one_over_hi;
1735  const double kin_i = 0.5 * hi * (u_i * u_i + v_i * v_i);
1736  const double mi = lumped_mass_matrix[i];
1737 
1738  /* Initialize bounds */
1739  h_min[i] = hi;
1740  h_max[i] = hi;
1741  heta_min[i] = hetai;
1742  heta_max[i] = hetai;
1743  kin_max[i] = kin_i;
1744 
1745  // For convex low-order update
1746  double sum_dij = 0.;
1747  double sum_dij_hbar = 0.;
1748  double sum_dij_hubar = 0.;
1749  double sum_dij_hvbar = 0.;
1750  double sum_dij_hetabar = 0.;
1751  double sum_dij_hwbar = 0.;
1752  double sum_dij_hbetabar = 0.;
1753 
1754  // loop in j (sparsity pattern)
1755  for (int offset = csrRowIndeces_DofLoops[i];
1756  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
1757  const int j = csrColumnOffsets_DofLoops[offset];
1758 
1759  const double one_over_hBT =
1760  2. * hBT[ij] /
1761  (hBT[ij] * hBT[ij] + std::pow(fmax(hBT[ij], hEps), 2));
1762  const double psi_ij =
1763  0.5 * one_over_hBT * (huBT[ij] * huBT[ij] + hvBT[ij] * hvBT[ij]);
1764 
1765  // compute local bounds
1766  h_min[i] = std::min(h_min[i], hBT[ij]);
1767  h_max[i] = std::max(h_max[i], hBT[ij]);
1768  heta_min[i] = std::min(heta_min[i], hetaBT[ij]);
1769  heta_max[i] = std::max(heta_max[i], hetaBT[ij]);
1770  kin_max[i] = fmax(psi_ij, kin_max[i]);
1771 
1772  if (j != i) {
1773  sum_dij += dLow[ij];
1774  sum_dij_hbar += dLow[ij] * hBT[ij];
1775  sum_dij_hubar += dLow[ij] * huBT[ij];
1776  sum_dij_hvbar += dLow[ij] * hvBT[ij];
1777  sum_dij_hetabar += dLow[ij] * hetaBT[ij];
1778  sum_dij_hwbar += dLow[ij] * hwBT[ij];
1779  sum_dij_hbetabar += dLow[ij] * hbetaBT[ij];
1780  }
1781 
1782  // update ij
1783  ij += 1;
1784  } // j loop ends here
1785 
1786  const double cfl_condition = (1. - dt / mi * 2. * sum_dij);
1787 
1788  // Here we define low-order convex update (WITHOUT SOURCES)
1789  hLow[i] = hi * cfl_condition + dt / mi * (2. * sum_dij_hbar);
1790  huLow[i] = hu_i * cfl_condition + dt / mi * (2. * sum_dij_hubar);
1791  hvLow[i] = hv_i * cfl_condition + dt / mi * (2. * sum_dij_hvbar);
1792  hetaLow[i] = hetai * cfl_condition + dt / mi * (2. * sum_dij_hetabar);
1793  hwLow[i] = hwi * cfl_condition + dt / mi * (2. * sum_dij_hwbar);
1794  hbetaLow[i] = hbetai * cfl_condition + dt / mi * (2. * sum_dij_hbetabar);
1795 
1796  // clean up hLow from round off error
1797  if (dt < 1e-2 && hLow[i] < -hEps) {
1798  std::cout << "Low-order water depth is negative !!! " << hLow[i] << "\n"
1799  << "hLow[i] + hEps !!! " << hLow[i] + hEps
1800  << "\n"
1801  << "hEps !!! " << hEps << "\n"
1802  << " ... aborting!" << std::endl;
1803  abort();
1804  } else {
1805  if (hLow[i] <= hEps) {
1806  // Clean up low-order h and heta
1807  hLow[i] = 0.;
1808  }
1809  }
1810 
1811 #if IF_DEBUGGING
1812  if (h_min[i] < 0.) {
1813  std::cout << " \n "
1814  << " Minimum water depth is negative !!! " << h_min[i]
1815  << "\n "
1816  << " hLow[i] !!! " << hLow[i] << "\n "
1817  << " ... aborting!"
1818  << "\n"
1819  << std::endl;
1820  } else {
1821  if (h_min[i] <= hEps) {
1822  // Clean up low-order h and heta
1823  h_min[i] = 0.;
1824  }
1825  }
1826 
1827  if (h_max[i] < 0.) {
1828  std::cout << " Maximum water depth is negative !!! " << h_max[i] << "\n"
1829  << " hLow[i] !!! " << hLow[i] << "\n "
1830  << " ... aborting!" << std::endl;
1831  abort();
1832  } else {
1833  if (h_max[i] <= hEps) {
1834  // Clean up low-order h and heta
1835  h_max[i] = 0.;
1836  }
1837  }
1838 #endif
1839 
1840  /* Relaxation of bounds */
1841  const double s_i = std::pow(std::sqrt(std::sqrt(mi / size_of_domain)), 3);
1842  const double urelax_i = 1. + s_i;
1843  const double drelax_i = 1. - s_i;
1844 
1845  kin_max[i] =
1846  std::max((1. + std::sqrt(mi / size_of_domain)) * kin_max[i], 0.);
1847  h_min[i] =
1848  std::max(drelax_i * h_min[i], h_min[i] - std::abs(bar_deltaSqd_h[i]));
1849  h_max[i] =
1850  std::min(urelax_i * h_max[i], h_max[i] + std::abs(bar_deltaSqd_h[i]));
1851  heta_min[i] = std::max(drelax_i * heta_min[i],
1852  heta_min[i] - std::abs(bar_deltaSqd_heta[i]));
1853  heta_max[i] = std::min(urelax_i * heta_max[i],
1854  heta_max[i] + std::abs(bar_deltaSqd_heta[i]));
1855 
1856 #if IF_DEBUGGING
1857  if (hLow[i] > h_max[i] || hLow[i] < h_min[i]) {
1858  std::cout << " --- We have a major problem (h bounds) --- "
1859  << std::setprecision(15) << std::endl;
1860  std::cout << "hLow[i] = " << hLow[i] << " \n "
1861  << "h_min[i] = " << h_min[i] << " \n "
1862  << "h_max[i] = " << h_max[i] << " \n "
1863  << "Diff max = " << h_max[i] - hLow[i] << " \n "
1864  << "Diff min = " << hLow[i] - h_min[i] << " \n " << std::endl;
1865  }
1866 
1867  if (heta_max[i] - hetaLow[i] < -hEps * hEps ||
1868  hetaLow[i] - heta_min[i] < -hEps * hEps) {
1869  std::cout << " --- We have a major problem (heta bounds) --- "
1870  << std::setprecision(15) << std::endl;
1871  std::cout << "hetaLow[i] = " << hetaLow[i] << " \n "
1872  << "heta_min[i] = " << heta_min[i] << " \n "
1873  << "heta_max[i] = " << heta_max[i] << " \n "
1874  << "Diff max = " << heta_max[i] - hetaLow[i] << " \n "
1875  << "Diff min = " << hetaLow[i] - heta_min[i] << " \n "
1876  << std::endl;
1877  }
1878 #endif
1879  } // i loop ends here
1880 
1881  // ********** END OF LOOP IN DOFs ********** //
1882  } // end calculateBoundsAndHighOrderRHS
1883 
1885  xt::pyarray<double> &mesh_trial_ref = args.array<double>("mesh_trial_ref");
1886  xt::pyarray<double> &mesh_grad_trial_ref =
1887  args.array<double>("mesh_grad_trial_ref");
1888  xt::pyarray<double> &mesh_dof = args.array<double>("mesh_dof");
1889  xt::pyarray<int> &mesh_l2g = args.array<int>("mesh_l2g");
1890  xt::pyarray<double> &dV_ref = args.array<double>("dV_ref");
1891  xt::pyarray<double> &h_trial_ref = args.array<double>("h_trial_ref");
1892  xt::pyarray<double> &h_grad_trial_ref =
1893  args.array<double>("h_grad_trial_ref");
1894  xt::pyarray<double> &h_test_ref = args.array<double>("h_test_ref");
1895  xt::pyarray<double> &h_grad_test_ref =
1896  args.array<double>("h_grad_test_ref");
1897  xt::pyarray<double> &vel_trial_ref = args.array<double>("vel_trial_ref");
1898  xt::pyarray<double> &vel_grad_trial_ref =
1899  args.array<double>("vel_grad_trial_ref");
1900  xt::pyarray<double> &vel_test_ref = args.array<double>("vel_test_ref");
1901  xt::pyarray<double> &vel_grad_test_ref =
1902  args.array<double>("vel_grad_test_ref");
1903  xt::pyarray<double> &mesh_trial_trace_ref =
1904  args.array<double>("mesh_trial_trace_ref");
1905  xt::pyarray<double> &mesh_grad_trial_trace_ref =
1906  args.array<double>("mesh_grad_trial_trace_ref");
1907  xt::pyarray<double> &h_trial_trace_ref =
1908  args.array<double>("h_trial_trace_ref");
1909  xt::pyarray<double> &h_grad_trial_trace_ref =
1910  args.array<double>("h_grad_trial_trace_ref");
1911  xt::pyarray<double> &h_test_trace_ref =
1912  args.array<double>("h_test_trace_ref");
1913  xt::pyarray<double> &h_grad_test_trace_ref =
1914  args.array<double>("h_grad_test_trace_ref");
1915  xt::pyarray<double> &vel_trial_trace_ref =
1916  args.array<double>("vel_trial_trace_ref");
1917  xt::pyarray<double> &vel_grad_trial_trace_ref =
1918  args.array<double>("vel_grad_trial_trace_ref");
1919  xt::pyarray<double> &vel_test_trace_ref =
1920  args.array<double>("vel_test_trace_ref");
1921  xt::pyarray<double> &vel_grad_test_trace_ref =
1922  args.array<double>("vel_grad_test_trace_ref");
1923  xt::pyarray<double> &normal_ref = args.array<double>("normal_ref");
1924  xt::pyarray<double> &boundaryJac_ref =
1925  args.array<double>("boundaryJac_ref");
1926  xt::pyarray<double> &elementDiameter =
1927  args.array<double>("elementDiameter");
1928  int nElements_global = args.scalar<int>("nElements_global");
1929  double g = args.scalar<double>("g");
1930  xt::pyarray<int> &h_l2g = args.array<int>("h_l2g");
1931  xt::pyarray<int> &vel_l2g = args.array<int>("vel_l2g");
1932  xt::pyarray<double> &h_dof_old = args.array<double>("h_dof_old");
1933  xt::pyarray<double> &hu_dof_old = args.array<double>("hu_dof_old");
1934  xt::pyarray<double> &hv_dof_old = args.array<double>("hv_dof_old");
1935  xt::pyarray<double> &heta_dof_old = args.array<double>("heta_dof_old");
1936  xt::pyarray<double> &hw_dof_old = args.array<double>("hw_dof_old");
1937  xt::pyarray<double> &hbeta_dof_old = args.array<double>("hbeta_dof_old");
1938  xt::pyarray<double> &b_dof = args.array<double>("b_dof");
1939  xt::pyarray<double> &h_dof = args.array<double>("h_dof");
1940  xt::pyarray<double> &hu_dof = args.array<double>("hu_dof");
1941  xt::pyarray<double> &hv_dof = args.array<double>("hv_dof");
1942  xt::pyarray<double> &heta_dof = args.array<double>("heta_dof");
1943  xt::pyarray<double> &hw_dof = args.array<double>("hw_dof");
1944  xt::pyarray<double> &hbeta_dof = args.array<double>("hbeta_dof");
1945  xt::pyarray<double> &q_cfl = args.array<double>("q_cfl");
1946  xt::pyarray<int> &sdInfo_hu_hu_rowptr =
1947  args.array<int>("sdInfo_hu_hu_rowptr");
1948  xt::pyarray<int> &sdInfo_hu_hu_colind =
1949  args.array<int>("sdInfo_hu_hu_colind");
1950  xt::pyarray<int> &sdInfo_hu_hv_rowptr =
1951  args.array<int>("sdInfo_hu_hv_rowptr");
1952  xt::pyarray<int> &sdInfo_hu_hv_colind =
1953  args.array<int>("sdInfo_hu_hv_colind");
1954  xt::pyarray<int> &sdInfo_hv_hv_rowptr =
1955  args.array<int>("sdInfo_hv_hv_rowptr");
1956  xt::pyarray<int> &sdInfo_hv_hv_colind =
1957  args.array<int>("sdInfo_hv_hv_colind");
1958  xt::pyarray<int> &sdInfo_hv_hu_rowptr =
1959  args.array<int>("sdInfo_hv_hu_rowptr");
1960  xt::pyarray<int> &sdInfo_hv_hu_colind =
1961  args.array<int>("sdInfo_hv_hu_colind");
1962  int offset_h = args.scalar<int>("offset_h");
1963  int offset_hu = args.scalar<int>("offset_hu");
1964  int offset_hv = args.scalar<int>("offset_hv");
1965  int offset_heta = args.scalar<int>("offset_heta");
1966  int offset_hw = args.scalar<int>("offset_hw");
1967  int offset_hbeta = args.scalar<int>("offset_hbeta");
1968  int stride_h = args.scalar<int>("stride_h");
1969  int stride_hu = args.scalar<int>("stride_hu");
1970  int stride_hv = args.scalar<int>("stride_hv");
1971  int stride_heta = args.scalar<int>("stride_heta");
1972  int stride_hw = args.scalar<int>("stride_hw");
1973  int stride_hbeta = args.scalar<int>("stride_hbeta");
1974  xt::pyarray<double> &globalResidual = args.array<double>("globalResidual");
1975  int nExteriorElementBoundaries_global =
1976  args.scalar<int>("nExteriorElementBoundaries_global");
1977  xt::pyarray<int> &exteriorElementBoundariesArray =
1978  args.array<int>("exteriorElementBoundariesArray");
1979  xt::pyarray<int> &elementBoundaryElementsArray =
1980  args.array<int>("elementBoundaryElementsArray");
1981  xt::pyarray<int> &elementBoundaryLocalElementBoundariesArray =
1982  args.array<int>("elementBoundaryLocalElementBoundariesArray");
1983  xt::pyarray<int> &isDOFBoundary_h = args.array<int>("isDOFBoundary_h");
1984  xt::pyarray<int> &isDOFBoundary_hu = args.array<int>("isDOFBoundary_hu");
1985  xt::pyarray<int> &isDOFBoundary_hv = args.array<int>("isDOFBoundary_hv");
1986  xt::pyarray<int> &isAdvectiveFluxBoundary_h =
1987  args.array<int>("isAdvectiveFluxBoundary_h");
1988  xt::pyarray<int> &isAdvectiveFluxBoundary_hu =
1989  args.array<int>("isAdvectiveFluxBoundary_hu");
1990  xt::pyarray<int> &isAdvectiveFluxBoundary_hv =
1991  args.array<int>("isAdvectiveFluxBoundary_hv");
1992  xt::pyarray<int> &isDiffusiveFluxBoundary_hu =
1993  args.array<int>("isDiffusiveFluxBoundary_hu");
1994  xt::pyarray<int> &isDiffusiveFluxBoundary_hv =
1995  args.array<int>("isDiffusiveFluxBoundary_hv");
1996  xt::pyarray<double> &ebqe_bc_h_ext = args.array<double>("ebqe_bc_h_ext");
1997  xt::pyarray<double> &ebqe_bc_flux_mass_ext =
1998  args.array<double>("ebqe_bc_flux_mass_ext");
1999  xt::pyarray<double> &ebqe_bc_flux_mom_hu_adv_ext =
2000  args.array<double>("ebqe_bc_flux_mom_hu_adv_ext");
2001  xt::pyarray<double> &ebqe_bc_flux_mom_hv_adv_ext =
2002  args.array<double>("ebqe_bc_flux_mom_hv_adv_ext");
2003  xt::pyarray<double> &ebqe_bc_hu_ext = args.array<double>("ebqe_bc_hu_ext");
2004  xt::pyarray<double> &ebqe_bc_flux_hu_diff_ext =
2005  args.array<double>("ebqe_bc_flux_hu_diff_ext");
2006  xt::pyarray<double> &ebqe_penalty_ext =
2007  args.array<double>("ebqe_penalty_ext");
2008  xt::pyarray<double> &ebqe_bc_hv_ext = args.array<double>("ebqe_bc_hv_ext");
2009  xt::pyarray<double> &ebqe_bc_flux_hv_diff_ext =
2010  args.array<double>("ebqe_bc_flux_hv_diff_ext");
2011  xt::pyarray<double> &q_velocity = args.array<double>("q_velocity");
2012  xt::pyarray<double> &ebqe_velocity = args.array<double>("ebqe_velocity");
2013  xt::pyarray<double> &flux = args.array<double>("flux");
2014  xt::pyarray<double> &elementResidual_h_save =
2015  args.array<double>("elementResidual_h_save");
2016  const xt::pyarray<double> &Cx = args.array<double>("Cx");
2017  const xt::pyarray<double> &Cy = args.array<double>("Cy");
2018  const xt::pyarray<double> &CTx = args.array<double>("CTx");
2019  const xt::pyarray<double> &CTy = args.array<double>("CTy");
2020  const int numDOFsPerEqn = args.scalar<int>("numDOFsPerEqn");
2021  const xt::pyarray<int> &csrRowIndeces_DofLoops =
2022  args.array<int>("csrRowIndeces_DofLoops");
2023  const xt::pyarray<int> &csrColumnOffsets_DofLoops =
2024  args.array<int>("csrColumnOffsets_DofLoops");
2025  const xt::pyarray<double> &lumped_mass_matrix =
2026  args.array<double>("lumped_mass_matrix");
2027  const double cfl_run = args.scalar<double>("cfl_run");
2028  const double hEps = args.scalar<double>("hEps");
2029  xt::pyarray<double> &hnp1_at_quad_point =
2030  args.array<double>("hnp1_at_quad_point");
2031  xt::pyarray<double> &hunp1_at_quad_point =
2032  args.array<double>("hunp1_at_quad_point");
2033  xt::pyarray<double> &hvnp1_at_quad_point =
2034  args.array<double>("hvnp1_at_quad_point");
2035  xt::pyarray<double> &hetanp1_at_quad_point =
2036  args.array<double>("hetanp1_at_quad_point");
2037  xt::pyarray<double> &hwnp1_at_quad_point =
2038  args.array<double>("hwnp1_at_quad_point");
2039  xt::pyarray<double> &hbetanp1_at_quad_point =
2040  args.array<double>("hbetanp1_at_quad_point");
2041  int LUMPED_MASS_MATRIX = args.scalar<int>("LUMPED_MASS_MATRIX");
2042  double dt = args.scalar<double>("dt");
2043  xt::pyarray<double> &quantDOFs = args.array<double>("quantDOFs");
2044  int SECOND_CALL_CALCULATE_RESIDUAL =
2045  args.scalar<int>("SECOND_CALL_CALCULATE_RESIDUAL");
2046  int COMPUTE_NORMALS = args.scalar<int>("COMPUTE_NORMALS");
2047  xt::pyarray<double> &normalx = args.array<double>("normalx");
2048  xt::pyarray<double> &normaly = args.array<double>("normaly");
2049  const int lstage = args.scalar<int>("lstage");
2050  const xt::pyarray<double> &MassMatrix = args.array<double>("MassMatrix");
2051  const xt::pyarray<double> &RHS_high_h = args.array<double>("RHS_high_h");
2052  const xt::pyarray<double> &RHS_high_hu = args.array<double>("RHS_high_hu");
2053  const xt::pyarray<double> &RHS_high_hv = args.array<double>("RHS_high_hv");
2054  const xt::pyarray<double> &RHS_high_heta =
2055  args.array<double>("RHS_high_heta");
2056  const xt::pyarray<double> &RHS_high_hw = args.array<double>("RHS_high_hw");
2057  const xt::pyarray<double> &RHS_high_hbeta =
2058  args.array<double>("RHS_high_hbeta");
2059 
2061  // ********** CELL LOOPS ********** //
2063  // To compute:
2064  // * Time derivative term
2065  // * Cell based CFL
2066  // * Velocity and soln at quad points (for other models)
2067  for (int eN = 0; eN < nElements_global; eN++) {
2068  // declare local storage for element residual and initialize
2069  register double elementResidual_h[nDOF_test_element],
2070  elementResidual_hu[nDOF_test_element],
2071  elementResidual_hv[nDOF_test_element],
2072  elementResidual_heta[nDOF_test_element],
2073  elementResidual_hw[nDOF_test_element],
2074  elementResidual_hbeta[nDOF_test_element];
2075 
2076  for (int i = 0; i < nDOF_test_element; i++) {
2077  elementResidual_h[i] = 0.0;
2078  elementResidual_hu[i] = 0.0;
2079  elementResidual_hv[i] = 0.0;
2080  elementResidual_heta[i] = 0.0;
2081  elementResidual_hw[i] = 0.0;
2082  elementResidual_hbeta[i] = 0.0;
2083  }
2084  //
2085  // loop over quadrature points and compute integrands
2086  //
2087  for (int k = 0; k < nQuadraturePoints_element; k++) {
2088  // compute indices and declare local storage
2089  register int eN_k = eN * nQuadraturePoints_element + k,
2090  eN_k_nSpace = eN_k * nSpace,
2091  eN_nDOF_trial_element = eN * nDOF_trial_element;
2092  register double h = 0.0, hu = 0.0, hv = 0.0, heta = 0.0, hw = 0.0,
2093  hbeta = 0.0, // solution at current time
2094  h_old = 0.0, hu_old = 0.0, hv_old = 0.0, heta_old = 0.0,
2095  hw_old = 0.0,
2096  hbeta_old = 0.0, // solution at lstage
2097  jac[nSpace * nSpace], jacDet, jacInv[nSpace * nSpace],
2098  h_test_dV[nDOF_trial_element], dV, x, y, xt, yt;
2099  // get jacobian, etc for mapping reference element
2100  ck.calculateMapping_element(
2101  eN, k, mesh_dof.data(), mesh_l2g.data(), mesh_trial_ref.data(),
2102  mesh_grad_trial_ref.data(), jac, jacDet, jacInv, x, y);
2103  // get the physical integration weight
2104  dV = fabs(jacDet) * dV_ref[k];
2105  // get the solution at current time
2106  ck.valFromDOF(h_dof.data(), &h_l2g.data()[eN_nDOF_trial_element],
2107  &h_trial_ref.data()[k * nDOF_trial_element], h);
2108  ck.valFromDOF(hu_dof.data(), &vel_l2g[eN_nDOF_trial_element],
2109  &vel_trial_ref.data()[k * nDOF_trial_element], hu);
2110  ck.valFromDOF(hv_dof.data(), &vel_l2g[eN_nDOF_trial_element],
2111  &vel_trial_ref.data()[k * nDOF_trial_element], hv);
2112  ck.valFromDOF(heta_dof.data(), &vel_l2g.data()[eN_nDOF_trial_element],
2113  &vel_trial_ref.data()[k * nDOF_trial_element], heta);
2114  ck.valFromDOF(hw_dof.data(), &vel_l2g.data()[eN_nDOF_trial_element],
2115  &vel_trial_ref.data()[k * nDOF_trial_element], hw);
2116  ck.valFromDOF(hbeta_dof.data(), &vel_l2g.data()[eN_nDOF_trial_element],
2117  &vel_trial_ref.data()[k * nDOF_trial_element], hbeta);
2118  // get the solution at the lstage
2119  ck.valFromDOF(h_dof_old.data(), &h_l2g.data()[eN_nDOF_trial_element],
2120  &h_trial_ref.data()[k * nDOF_trial_element], h_old);
2121  ck.valFromDOF(hu_dof_old.data(), &vel_l2g.data()[eN_nDOF_trial_element],
2122  &vel_trial_ref.data()[k * nDOF_trial_element], hu_old);
2123  ck.valFromDOF(hv_dof_old.data(), &vel_l2g.data()[eN_nDOF_trial_element],
2124  &vel_trial_ref.data()[k * nDOF_trial_element], hv_old);
2125  ck.valFromDOF(heta_dof_old.data(), &vel_l2g[eN_nDOF_trial_element],
2126  &vel_trial_ref.data()[k * nDOF_trial_element], heta_old);
2127  ck.valFromDOF(hw_dof_old.data(), &vel_l2g.data()[eN_nDOF_trial_element],
2128  &vel_trial_ref.data()[k * nDOF_trial_element], hw_old);
2129  ck.valFromDOF(hbeta_dof_old.data(),
2130  &vel_l2g.data()[eN_nDOF_trial_element],
2131  &vel_trial_ref.data()[k * nDOF_trial_element], hbeta_old);
2132  // calculate cell based CFL to keep a reference
2133  calculateCFL(elementDiameter.data()[eN], g, h_old, hu_old, hv_old, hEps,
2134  q_cfl[eN_k]);
2135  // precalculate test function products with integration weights
2136  for (int j = 0; j < nDOF_trial_element; j++)
2137  h_test_dV[j] = h_test_ref[k * nDOF_trial_element + j] * dV;
2138 
2139  // SAVE VELOCITY // at quadrature points for other models to use
2140  q_velocity[eN_k_nSpace + 0] =
2141  2 * h / (h * h + std::pow(fmax(h, hEps), 2)) * hu;
2142  q_velocity[eN_k_nSpace + 1] =
2143  2 * h / (h * h + std::pow(fmax(h, hEps), 2)) * hv;
2144  hnp1_at_quad_point[eN_k] = h;
2145  hunp1_at_quad_point[eN_k] = hu;
2146  hvnp1_at_quad_point[eN_k] = hv;
2147  hetanp1_at_quad_point[eN_k] = heta;
2148  hwnp1_at_quad_point[eN_k] = hw;
2149  hbetanp1_at_quad_point[eN_k] = hbeta;
2150 
2151  for (int i = 0; i < nDOF_test_element; i++) {
2152  // compute time derivative part of global residual. NOTE: no lumping
2153  elementResidual_h[i] += (h - h_old) * h_test_dV[i];
2154  elementResidual_hu[i] += (hu - hu_old) * h_test_dV[i];
2155  elementResidual_hv[i] += (hv - hv_old) * h_test_dV[i];
2156  elementResidual_heta[i] += (heta - heta_old) * h_test_dV[i];
2157  elementResidual_hw[i] += (hw - hw_old) * h_test_dV[i];
2158  elementResidual_hbeta[i] += (hbeta - hbeta_old) * h_test_dV[i];
2159  }
2160  }
2161  // distribute
2162  for (int i = 0; i < nDOF_test_element; i++) {
2163  register int eN_i = eN * nDOF_test_element + i;
2164 
2165  // global i-th index for h (this is same for vel_l2g)
2166  int h_gi = h_l2g[eN_i];
2167 
2168  // distribute time derivative to global residual
2169  globalResidual[offset_h + stride_h * h_gi] += elementResidual_h[i];
2170  globalResidual[offset_hu + stride_hu * h_gi] += elementResidual_hu[i];
2171  globalResidual[offset_hv + stride_hv * h_gi] += elementResidual_hv[i];
2172  globalResidual[offset_heta + stride_heta * h_gi] +=
2173  elementResidual_heta[i];
2174  globalResidual[offset_hw + stride_hw * h_gi] += elementResidual_hw[i];
2175  globalResidual[offset_hbeta + stride_hbeta * h_gi] +=
2176  elementResidual_hbeta[i];
2177  }
2178  }
2179  // ********** END OF CELL LOOPS ********** //
2180 
2181  if (SECOND_CALL_CALCULATE_RESIDUAL == 0) // This is to save some time
2182  {
2184  // ********** FIRST AND ONLY LOOP ON DOFs ********** //
2186  // To compute:
2187  // * High-order update with neumann series approximation
2188 
2189  int ij = 0;
2190  for (int i = 0; i < numDOFsPerEqn; i++) {
2191 
2192  // get things at ith node
2193  const double hi = h_dof_old[i];
2194  const double hui = hu_dof_old[i];
2195  const double hvi = hv_dof_old[i];
2196  const double hetai = heta_dof_old[i];
2197  const double hwi = hw_dof_old[i];
2198  const double hbetai = hbeta_dof_old[i];
2199  const double mi = lumped_mass_matrix[i];
2200 
2201  double sum_RHS_h = 0.;
2202  double sum_RHS_hu = 0.;
2203  double sum_RHS_hv = 0.;
2204  double sum_RHS_heta = 0.;
2205  double sum_RHS_hw = 0.;
2206  double sum_RHS_hbeta = 0.;
2207 
2208  double b_ij = 0., b_ji = 0.;
2209 
2210  // loop over the sparsity pattern of the i-th DOF
2211  for (int offset = csrRowIndeces_DofLoops[i];
2212  offset < csrRowIndeces_DofLoops[i + 1]; offset++) {
2213 
2214  int j = csrColumnOffsets_DofLoops[offset];
2215 
2216  // get things at jth node
2217  const double mj = lumped_mass_matrix[j];
2218 
2219  // define b_ij and b_ji
2220  if (j != i) {
2221  b_ij = (0. - MassMatrix[ij] / mj);
2222  b_ji = (0. - MassMatrix[ij] / mi);
2223  } else {
2224  b_ij = (1. - MassMatrix[ij] / mj);
2225  b_ji = (1. - MassMatrix[ij] / mi);
2226  }
2227 
2228  // define sum for high-order RHS
2229  sum_RHS_h += b_ij * RHS_high_h[j] - b_ji * RHS_high_h[i];
2230  sum_RHS_hu += b_ij * RHS_high_hu[j] - b_ji * RHS_high_hu[i];
2231  sum_RHS_hv += b_ij * RHS_high_hv[j] - b_ji * RHS_high_hv[i];
2232  sum_RHS_heta += b_ij * RHS_high_heta[j] - b_ji * RHS_high_heta[i];
2233  sum_RHS_hw += b_ij * RHS_high_hw[j] - b_ji * RHS_high_hw[i];
2234  sum_RHS_hbeta += b_ij * RHS_high_hbeta[j] - b_ji * RHS_high_hbeta[i];
2235 
2236  // update ij
2237  ij += 1;
2238  } // j loop ends here
2239 
2240  /* Define global residual */
2241  // if (LUMPED_MASS_MATRIX == 1) {
2242  globalResidual[offset_h + stride_h * i] =
2243  hi + dt / mi * (RHS_high_h[i] + sum_RHS_h);
2244 
2245  globalResidual[offset_hu + stride_hu * i] =
2246  hui + dt / mi * (RHS_high_hu[i] + sum_RHS_hu);
2247 
2248  globalResidual[offset_hv + stride_hv * i] =
2249  hvi + dt / mi * (RHS_high_hv[i] + sum_RHS_hv);
2250 
2251  globalResidual[offset_heta + stride_heta * i] =
2252  hetai + dt / mi * (RHS_high_heta[i] + sum_RHS_heta);
2253 
2254  globalResidual[offset_hw + stride_hw * i] =
2255  hwi + dt / mi * (RHS_high_hw[i] + sum_RHS_hw);
2256 
2257  globalResidual[offset_hbeta + stride_hbeta * i] =
2258  hbetai + dt / mi * (RHS_high_hbeta[i] + sum_RHS_hbeta);
2259 
2260  // clean up potential negative water height due to machine precision
2261  if (globalResidual[offset_h + stride_h * i] >= -hEps &&
2262  globalResidual[offset_h + stride_h * i] < hEps) {
2263  globalResidual[offset_h + stride_h * i] = 0.;
2264  }
2265  }
2266  // ********** END OF LOOP IN DOFs ********** //
2267  } // end SECOND_CALL_CALCULATE_RESIDUAL
2268 
2269  // ********** COMPUTE NORMALS ********** //
2270  if (COMPUTE_NORMALS == 1) {
2271  // This is to identify the normals and create a vector of normal
2272  // components
2273  for (int ebNE = 0; ebNE < nExteriorElementBoundaries_global; ebNE++) {
2274  register int
2275  ebN = exteriorElementBoundariesArray[ebNE],
2276  eN = elementBoundaryElementsArray[ebN * 2 + 0],
2277  ebN_local = elementBoundaryLocalElementBoundariesArray[ebN * 2 + 0];
2278  register double normal[3];
2279  { // "Loop" in quad points
2280  int kb = 0; // NOTE: I need to consider just one quad point since
2281  // the element is not curved so the normal is constant
2282  // per element
2283  register int ebN_local_kb =
2284  ebN_local * nQuadraturePoints_elementBoundary + kb;
2285  register double jac_ext[nSpace * nSpace], jacDet_ext,
2286  jacInv_ext[nSpace * nSpace], boundaryJac[nSpace * (nSpace - 1)],
2287  metricTensor[(nSpace - 1) * (nSpace - 1)], metricTensorDetSqrt,
2288  x_ext, y_ext;
2289  /* compute information about mapping from reference element to
2290  * physical element */
2291  ck.calculateMapping_elementBoundary(
2292  eN, ebN_local, kb, ebN_local_kb, mesh_dof.data(), mesh_l2g.data(),
2293  mesh_trial_trace_ref.data(), mesh_grad_trial_trace_ref.data(),
2294  boundaryJac_ref.data(), jac_ext, jacDet_ext, jacInv_ext,
2295  boundaryJac, metricTensor, metricTensorDetSqrt, normal_ref.data(),
2296  normal, x_ext, y_ext);
2297  }
2298  // distribute the normal vectors
2299  for (int i = 0; i < nDOF_test_element; i++) {
2300  int eN_i = eN * nDOF_test_element + i;
2301  int gi = h_l2g[eN_i];
2302  normalx[gi] += 0.5 * normal[0] * (i == ebN_local ? 0. : 1.);
2303  normaly[gi] += 0.5 * normal[1] * (i == ebN_local ? 0. : 1.);
2304  }
2305  }
2306  // normalize
2307  for (int gi = 0; gi < numDOFsPerEqn; gi++) {
2308  double norm_factor =
2309  sqrt(std::pow(normalx[gi], 2) + std::pow(normaly[gi], 2));
2310  if (norm_factor != 0) {
2311  normalx[gi] /= norm_factor;
2312  normaly[gi] /= norm_factor;
2313  }
2314  }
2315  }
2316  // ********** END OF COMPUTING NORMALS ********** //
2317  } // end calculateResidual
2318 
2320  xt::pyarray<double> &mesh_trial_ref = args.array<double>("mesh_trial_ref");
2321  xt::pyarray<double> &mesh_grad_trial_ref =
2322  args.array<double>("mesh_grad_trial_ref");
2323  xt::pyarray<double> &mesh_dof = args.array<double>("mesh_dof");
2324  xt::pyarray<double> &mesh_velocity_dof =
2325  args.array<double>("mesh_velocity_dof");
2326  double MOVING_DOMAIN = args.scalar<double>("MOVING_DOMAIN");
2327  xt::pyarray<int> &mesh_l2g = args.array<int>("mesh_l2g");
2328  xt::pyarray<double> &dV_ref = args.array<double>("dV_ref");
2329  xt::pyarray<double> &h_trial_ref = args.array<double>("h_trial_ref");
2330  xt::pyarray<double> &h_grad_trial_ref =
2331  args.array<double>("h_grad_trial_ref");
2332  xt::pyarray<double> &h_test_ref = args.array<double>("h_test_ref");
2333  xt::pyarray<double> &h_grad_test_ref =
2334  args.array<double>("h_grad_test_ref");
2335  xt::pyarray<double> &vel_trial_ref = args.array<double>("vel_trial_ref");
2336  xt::pyarray<double> &vel_grad_trial_ref =
2337  args.array<double>("vel_grad_trial_ref");
2338  xt::pyarray<double> &vel_test_ref = args.array<double>("vel_test_ref");
2339  xt::pyarray<double> &vel_grad_test_ref =
2340  args.array<double>("vel_grad_test_ref");
2341  xt::pyarray<double> &mesh_trial_trace_ref =
2342  args.array<double>("mesh_trial_trace_ref");
2343  xt::pyarray<double> &mesh_grad_trial_trace_ref =
2344  args.array<double>("mesh_grad_trial_trace_ref");
2345  xt::pyarray<double> &dS_ref = args.array<double>("dS_ref");
2346  xt::pyarray<double> &h_trial_trace_ref =
2347  args.array<double>("h_trial_trace_ref");
2348  xt::pyarray<double> &h_grad_trial_trace_ref =
2349  args.array<double>("h_grad_trial_trace_ref");
2350  xt::pyarray<double> &h_test_trace_ref =
2351  args.array<double>("h_test_trace_ref");
2352  xt::pyarray<double> &h_grad_test_trace_ref =
2353  args.array<double>("h_grad_test_trace_ref");
2354  xt::pyarray<double> &vel_trial_trace_ref =
2355  args.array<double>("vel_trial_trace_ref");
2356  xt::pyarray<double> &vel_grad_trial_trace_ref =
2357  args.array<double>("vel_grad_trial_trace_ref");
2358  xt::pyarray<double> &vel_test_trace_ref =
2359  args.array<double>("vel_test_trace_ref");
2360  xt::pyarray<double> &vel_grad_test_trace_ref =
2361  args.array<double>("vel_grad_test_trace_ref");
2362  xt::pyarray<double> &normal_ref = args.array<double>("normal_ref");
2363  xt::pyarray<double> &boundaryJac_ref =
2364  args.array<double>("boundaryJac_ref");
2365  xt::pyarray<double> &elementDiameter =
2366  args.array<double>("elementDiameter");
2367  int nElements_global = args.scalar<int>("nElements_global");
2368  double g = args.scalar<double>("g");
2369  xt::pyarray<int> &h_l2g = args.array<int>("h_l2g");
2370  xt::pyarray<int> &vel_l2g = args.array<int>("vel_l2g");
2371  xt::pyarray<double> &b_dof = args.array<double>("b_dof");
2372  xt::pyarray<double> &h_dof = args.array<double>("h_dof");
2373  xt::pyarray<double> &hu_dof = args.array<double>("hu_dof");
2374  xt::pyarray<double> &hv_dof = args.array<double>("hv_dof");
2375  xt::pyarray<double> &heta_dof = args.array<double>("heta_dof");
2376  xt::pyarray<double> &hw_dof = args.array<double>("hw_dof");
2377  xt::pyarray<double> &hbeta_dof = args.array<double>("hbeta_dof");
2378  xt::pyarray<double> &q_cfl = args.array<double>("q_cfl");
2379  xt::pyarray<int> &sdInfo_hu_hu_rowptr =
2380  args.array<int>("sdInfo_hu_hu_rowptr");
2381  xt::pyarray<int> &sdInfo_hu_hu_colind =
2382  args.array<int>("sdInfo_hu_hu_colind");
2383  xt::pyarray<int> &sdInfo_hu_hv_rowptr =
2384  args.array<int>("sdInfo_hu_hv_rowptr");
2385  xt::pyarray<int> &sdInfo_hu_hv_colind =
2386  args.array<int>("sdInfo_hu_hv_colind");
2387  xt::pyarray<int> &sdInfo_hv_hv_rowptr =
2388  args.array<int>("sdInfo_hv_hv_rowptr");
2389  xt::pyarray<int> &sdInfo_hv_hv_colind =
2390  args.array<int>("sdInfo_hv_hv_colind");
2391  xt::pyarray<int> &sdInfo_hv_hu_rowptr =
2392  args.array<int>("sdInfo_hv_hu_rowptr");
2393  xt::pyarray<int> &sdInfo_hv_hu_colind =
2394  args.array<int>("sdInfo_hv_hu_colind");
2395  xt::pyarray<int> &csrRowIndeces_h_h = args.array<int>("csrRowIndeces_h_h");
2396  xt::pyarray<int> &csrColumnOffsets_h_h =
2397  args.array<int>("csrColumnOffsets_h_h");
2398  xt::pyarray<int> &csrRowIndeces_h_hu =
2399  args.array<int>("csrRowIndeces_h_hu");
2400  xt::pyarray<int> &csrColumnOffsets_h_hu =
2401  args.array<int>("csrColumnOffsets_h_hu");
2402  xt::pyarray<int> &csrRowIndeces_h_hv =
2403  args.array<int>("csrRowIndeces_h_hv");
2404  xt::pyarray<int> &csrColumnOffsets_h_hv =
2405  args.array<int>("csrColumnOffsets_h_hv");
2406  xt::pyarray<int> &csrRowIndeces_h_heta =
2407  args.array<int>("csrRowIndeces_h_heta");
2408  xt::pyarray<int> &csrColumnOffsets_h_heta =
2409  args.array<int>("csrColumnOffsets_h_heta");
2410  xt::pyarray<int> &csrRowIndeces_h_hw =
2411  args.array<int>("csrRowIndeces_h_hw");
2412  xt::pyarray<int> &csrColumnOffsets_h_hw =
2413  args.array<int>("csrColumnOffsets_h_hw");
2414  xt::pyarray<int> &csrRowIndeces_h_hbeta =
2415  args.array<int>("csrRowIndeces_h_hbeta");
2416  xt::pyarray<int> &csrColumnOffsets_h_hbeta =
2417  args.array<int>("csrColumnOffsets_h_hbeta");
2418  xt::pyarray<int> &csrRowIndeces_hu_h =
2419  args.array<int>("csrRowIndeces_hu_h");
2420  xt::pyarray<int> &csrColumnOffsets_hu_h =
2421  args.array<int>("csrColumnOffsets_hu_h");
2422  xt::pyarray<int> &csrRowIndeces_hu_hu =
2423  args.array<int>("csrRowIndeces_hu_hu");
2424  xt::pyarray<int> &csrColumnOffsets_hu_hu =
2425  args.array<int>("csrColumnOffsets_hu_hu");
2426  xt::pyarray<int> &csrRowIndeces_hu_hv =
2427  args.array<int>("csrRowIndeces_hu_hv");
2428  xt::pyarray<int> &csrColumnOffsets_hu_hv =
2429  args.array<int>("csrColumnOffsets_hu_hv");
2430  xt::pyarray<int> &csrRowIndeces_hu_heta =
2431  args.array<int>("csrRowIndeces_hu_heta");
2432  xt::pyarray<int> &csrColumnOffsets_hu_heta =
2433  args.array<int>("csrColumnOffsets_hu_heta");
2434  xt::pyarray<int> &csrRowIndeces_hu_hw =
2435  args.array<int>("csrRowIndeces_hu_hw");
2436  xt::pyarray<int> &csrColumnOffsets_hu_hw =
2437  args.array<int>("csrColumnOffsets_hu_hw");
2438  xt::pyarray<int> &csrRowIndeces_hu_hbeta =
2439  args.array<int>("csrRowIndeces_hu_hbeta");
2440  xt::pyarray<int> &csrColumnOffsets_hu_hbeta =
2441  args.array<int>("csrColumnOffsets_hu_hbeta");
2442  xt::pyarray<int> &csrRowIndeces_hv_h =
2443  args.array<int>("csrRowIndeces_hv_h");
2444  xt::pyarray<int> &csrColumnOffsets_hv_h =
2445  args.array<int>("csrColumnOffsets_hv_h");
2446  xt::pyarray<int> &csrRowIndeces_hv_hu =
2447  args.array<int>("csrRowIndeces_hv_hu");
2448  xt::pyarray<int> &csrColumnOffsets_hv_hu =
2449  args.array<int>("csrColumnOffsets_hv_hu");
2450  xt::pyarray<int> &csrRowIndeces_hv_hv =
2451  args.array<int>("csrRowIndeces_hv_hv");
2452  xt::pyarray<int> &csrColumnOffsets_hv_hv =
2453  args.array<int>("csrColumnOffsets_hv_hv");
2454  xt::pyarray<int> &csrRowIndeces_hv_heta =
2455  args.array<int>("csrRowIndeces_hv_heta");
2456  xt::pyarray<int> &csrColumnOffsets_hv_heta =
2457  args.array<int>("csrColumnOffsets_hv_heta");
2458  xt::pyarray<int> &csrRowIndeces_hv_hw =
2459  args.array<int>("csrRowIndeces_hv_hw");
2460  xt::pyarray<int> &csrColumnOffsets_hv_hw =
2461  args.array<int>("csrColumnOffsets_hv_hw");
2462  xt::pyarray<int> &csrRowIndeces_hv_hbeta =
2463  args.array<int>("csrRowIndeces_hv_hbeta");
2464  xt::pyarray<int> &csrColumnOffsets_hv_hbeta =
2465  args.array<int>("csrColumnOffsets_hv_hbeta");
2466  xt::pyarray<int> &csrRowIndeces_heta_h =
2467  args.array<int>("csrRowIndeces_heta_h");
2468  xt::pyarray<int> &csrColumnOffsets_heta_h =
2469  args.array<int>("csrColumnOffsets_heta_h");
2470  xt::pyarray<int> &csrRowIndeces_heta_hu =
2471  args.array<int>("csrRowIndeces_heta_hu");
2472  xt::pyarray<int> &csrColumnOffsets_heta_hu =
2473  args.array<int>("csrColumnOffsets_heta_hu");
2474  xt::pyarray<int> &csrRowIndeces_heta_hv =
2475  args.array<int>("csrRowIndeces_heta_hv");
2476  xt::pyarray<int> &csrColumnOffsets_heta_hv =
2477  args.array<int>("csrColumnOffsets_heta_hv");
2478  xt::pyarray<int> &csrRowIndeces_heta_heta =
2479  args.array<int>("csrRowIndeces_heta_heta");
2480  xt::pyarray<int> &csrColumnOffsets_heta_heta =
2481  args.array<int>("csrColumnOffsets_heta_heta");
2482  xt::pyarray<int> &csrRowIndeces_heta_hw =
2483  args.array<int>("csrRowIndeces_heta_hw");
2484  xt::pyarray<int> &csrColumnOffsets_heta_hw =
2485  args.array<int>("csrColumnOffsets_heta_hw");
2486  xt::pyarray<int> &csrRowIndeces_heta_hbeta =
2487  args.array<int>("csrRowIndeces_heta_hbeta");
2488  xt::pyarray<int> &csrColumnOffsets_heta_hbeta =
2489  args.array<int>("csrColumnOffsets_heta_hbeta");
2490  xt::pyarray<int> &csrRowIndeces_hw_h =
2491  args.array<int>("csrRowIndeces_hw_h");
2492  xt::pyarray<int> &csrColumnOffsets_hw_h =
2493  args.array<int>("csrColumnOffsets_hw_h");
2494  xt::pyarray<int> &csrRowIndeces_hw_hu =
2495  args.array<int>("csrRowIndeces_hw_hu");
2496  xt::pyarray<int> &csrColumnOffsets_hw_hu =
2497  args.array<int>("csrColumnOffsets_hw_hu");
2498  xt::pyarray<int> &csrRowIndeces_hw_hv =
2499  args.array<int>("csrRowIndeces_hw_hv");
2500  xt::pyarray<int> &csrColumnOffsets_hw_hv =
2501  args.array<int>("csrColumnOffsets_hw_hv");
2502  xt::pyarray<int> &csrRowIndeces_hw_heta =
2503  args.array<int>("csrRowIndeces_hw_heta");
2504  xt::pyarray<int> &csrColumnOffsets_hw_heta =
2505  args.array<int>("csrColumnOffsets_hw_heta");
2506  xt::pyarray<int> &csrRowIndeces_hw_hw =
2507  args.array<int>("csrRowIndeces_hw_hw");
2508  xt::pyarray<int> &csrColumnOffsets_hw_hw =
2509  args.array<int>("csrColumnOffsets_hw_hw");
2510  xt::pyarray<int> &csrRowIndeces_hw_hbeta =
2511  args.array<int>("csrRowIndeces_hw_hbeta");
2512  xt::pyarray<int> &csrColumnOffsets_hw_hbeta =
2513  args.array<int>("csrColumnOffsets_hw_hbeta");
2514  //
2515  xt::pyarray<int> &csrRowIndeces_hbeta_h =
2516  args.array<int>("csrRowIndeces_hbeta_h");
2517  xt::pyarray<int> &csrColumnOffsets_hbeta_h =
2518  args.array<int>("csrColumnOffsets_hbeta_h");
2519  xt::pyarray<int> &csrRowIndeces_hbeta_hu =
2520  args.array<int>("csrRowIndeces_hbeta_hu");
2521  xt::pyarray<int> &csrColumnOffsets_hbeta_hu =
2522  args.array<int>("csrColumnOffsets_hbeta_hu");
2523  xt::pyarray<int> &csrRowIndeces_hbeta_hv =
2524  args.array<int>("csrRowIndeces_hbeta_hv");
2525  xt::pyarray<int> &csrColumnOffsets_hbeta_hv =
2526  args.array<int>("csrColumnOffsets_hbeta_hv");
2527  xt::pyarray<int> &csrRowIndeces_hbeta_heta =
2528  args.array<int>("csrRowIndeces_hbeta_heta");
2529  xt::pyarray<int> &csrColumnOffsets_hbeta_heta =
2530  args.array<int>("csrColumnOffsets_hbeta_heta");
2531  xt::pyarray<int> &csrRowIndeces_hbeta_hw =
2532  args.array<int>("csrRowIndeces_hbeta_hw");
2533  xt::pyarray<int> &csrColumnOffsets_hbeta_hw =
2534  args.array<int>("csrColumnOffsets_hbeta_hw");
2535  xt::pyarray<int> &csrRowIndeces_hbeta_hbeta =
2536  args.array<int>("csrRowIndeces_hbeta_hbeta");
2537  xt::pyarray<int> &csrColumnOffsets_hbeta_hbeta =
2538  args.array<int>("csrColumnOffsets_hbeta_hbeta");
2539  xt::pyarray<double> &globalJacobian = args.array<double>("globalJacobian");
2540  int nExteriorElementBoundaries_global =
2541  args.scalar<int>("nExteriorElementBoundaries_global");
2542  xt::pyarray<int> &exteriorElementBoundariesArray =
2543  args.array<int>("exteriorElementBoundariesArray");
2544  xt::pyarray<int> &elementBoundaryElementsArray =
2545  args.array<int>("elementBoundaryElementsArray");
2546  xt::pyarray<int> &elementBoundaryLocalElementBoundariesArray =
2547  args.array<int>("elementBoundaryLocalElementBoundariesArray");
2548  xt::pyarray<int> &isDOFBoundary_h = args.array<int>("isDOFBoundary_h");
2549  xt::pyarray<int> &isDOFBoundary_hu = args.array<int>("isDOFBoundary_hu");
2550  xt::pyarray<int> &isDOFBoundary_hv = args.array<int>("isDOFBoundary_hv");
2551  xt::pyarray<int> &isAdvectiveFluxBoundary_h =
2552  args.array<int>("isAdvectiveFluxBoundary_h");
2553  xt::pyarray<int> &isAdvectiveFluxBoundary_hu =
2554  args.array<int>("isAdvectiveFluxBoundary_hu");
2555  xt::pyarray<int> &isAdvectiveFluxBoundary_hv =
2556  args.array<int>("isAdvectiveFluxBoundary_hv");
2557  xt::pyarray<int> &isDiffusiveFluxBoundary_hu =
2558  args.array<int>("isDiffusiveFluxBoundary_hu");
2559  xt::pyarray<int> &isDiffusiveFluxBoundary_hv =
2560  args.array<int>("isDiffusiveFluxBoundary_hv");
2561  xt::pyarray<double> &ebqe_bc_h_ext = args.array<double>("ebqe_bc_h_ext");
2562  xt::pyarray<double> &ebqe_bc_flux_mass_ext =
2563  args.array<double>("ebqe_bc_flux_mass_ext");
2564  xt::pyarray<double> &ebqe_bc_flux_mom_hu_adv_ext =
2565  args.array<double>("ebqe_bc_flux_mom_hu_adv_ext");
2566  xt::pyarray<double> &ebqe_bc_flux_mom_hv_adv_ext =
2567  args.array<double>("ebqe_bc_flux_mom_hv_adv_ext");
2568  xt::pyarray<double> &ebqe_bc_hu_ext = args.array<double>("ebqe_bc_hu_ext");
2569  xt::pyarray<double> &ebqe_bc_flux_hu_diff_ext =
2570  args.array<double>("ebqe_bc_flux_hu_diff_ext");
2571  xt::pyarray<double> &ebqe_penalty_ext =
2572  args.array<double>("ebqe_penalty_ext");
2573  xt::pyarray<double> &ebqe_bc_hv_ext = args.array<double>("ebqe_bc_hv_ext");
2574  xt::pyarray<double> &ebqe_bc_flux_hv_diff_ext =
2575  args.array<double>("ebqe_bc_flux_hv_diff_ext");
2576  xt::pyarray<int> &csrColumnOffsets_eb_h_h =
2577  args.array<int>("csrColumnOffsets_eb_h_h");
2578  xt::pyarray<int> &csrColumnOffsets_eb_h_hu =
2579  args.array<int>("csrColumnOffsets_eb_h_hu");
2580  xt::pyarray<int> &csrColumnOffsets_eb_h_hv =
2581  args.array<int>("csrColumnOffsets_eb_h_hv");
2582  xt::pyarray<int> &csrColumnOffsets_eb_hu_h =
2583  args.array<int>("csrColumnOffsets_eb_hu_h");
2584  xt::pyarray<int> &csrColumnOffsets_eb_hu_hu =
2585  args.array<int>("csrColumnOffsets_eb_hu_hu");
2586  xt::pyarray<int> &csrColumnOffsets_eb_hu_hv =
2587  args.array<int>("csrColumnOffsets_eb_hu_hv");
2588  xt::pyarray<int> &csrColumnOffsets_eb_hv_h =
2589  args.array<int>("csrColumnOffsets_eb_hv_h");
2590  xt::pyarray<int> &csrColumnOffsets_eb_hv_hu =
2591  args.array<int>("csrColumnOffsets_eb_hv_hu");
2592  xt::pyarray<int> &csrColumnOffsets_eb_hv_hv =
2593  args.array<int>("csrColumnOffsets_eb_hv_hv");
2594  double dt = args.scalar<double>("dt");
2595  //
2596  // loop over elements to compute volume integrals and load them into the
2597  // element Jacobians and global Jacobian
2598  //
2599  for (int eN = 0; eN < nElements_global; eN++) {
2600  register double elementJacobian_h_h[nDOF_test_element]
2601  [nDOF_trial_element],
2602  elementJacobian_hu_hu[nDOF_test_element][nDOF_trial_element],
2603  elementJacobian_hv_hv[nDOF_test_element][nDOF_trial_element],
2604  elementJacobian_heta_heta[nDOF_test_element][nDOF_trial_element],
2605  elementJacobian_hw_hw[nDOF_test_element][nDOF_trial_element],
2606  elementJacobian_hbeta_hbeta[nDOF_test_element][nDOF_trial_element];
2607  for (int i = 0; i < nDOF_test_element; i++)
2608  for (int j = 0; j < nDOF_trial_element; j++) {
2609  elementJacobian_h_h[i][j] = 0.0;
2610  elementJacobian_hu_hu[i][j] = 0.0;
2611  elementJacobian_hv_hv[i][j] = 0.0;
2612  elementJacobian_heta_heta[i][j] = 0.0;
2613  elementJacobian_hw_hw[i][j] = 0.0;
2614  elementJacobian_hbeta_hbeta[i][j] = 0.0;
2615  }
2616  for (int k = 0; k < nQuadraturePoints_element; k++) {
2617  int eN_k = eN * nQuadraturePoints_element +
2618  k, // index to a scalar at a quadrature point
2619  eN_k_nSpace = eN_k * nSpace,
2620  eN_nDOF_trial_element =
2621  eN * nDOF_trial_element; // index to a vector at a
2622  // quadrature point
2623 
2624  // declare local storage
2625  register double jac[nSpace * nSpace], jacDet, jacInv[nSpace * nSpace],
2626  dV, h_test_dV[nDOF_test_element], vel_test_dV[nDOF_test_element], x,
2627  y, xt, yt;
2628  // get jacobian, etc for mapping reference element
2629  ck.calculateMapping_element(
2630  eN, k, mesh_dof.data(), mesh_l2g.data(), mesh_trial_ref.data(),
2631  mesh_grad_trial_ref.data(), jac, jacDet, jacInv, x, y);
2632  // get the physical integration weight
2633  dV = fabs(jacDet) * dV_ref[k];
2634  // precalculate test function products with integration weights
2635  for (int j = 0; j < nDOF_trial_element; j++) {
2636  h_test_dV[j] = h_test_ref[k * nDOF_trial_element + j] * dV;
2637  vel_test_dV[j] = vel_test_ref[k * nDOF_trial_element + j] * dV;
2638  }
2639  for (int i = 0; i < nDOF_test_element; i++) {
2640  register int i_nSpace = i * nSpace;
2641  for (int j = 0; j < nDOF_trial_element; j++) {
2642  register int j_nSpace = j * nSpace;
2643  elementJacobian_h_h[i][j] +=
2644  h_trial_ref[k * nDOF_trial_element + j] * h_test_dV[i];
2645  elementJacobian_hu_hu[i][j] +=
2646  vel_trial_ref[k * nDOF_trial_element + j] * vel_test_dV[i];
2647  elementJacobian_hv_hv[i][j] +=
2648  vel_trial_ref[k * nDOF_trial_element + j] * vel_test_dV[i];
2649  elementJacobian_heta_heta[i][j] +=
2650  vel_trial_ref[k * nDOF_trial_element + j] * vel_test_dV[i];
2651  elementJacobian_hw_hw[i][j] +=
2652  vel_trial_ref[k * nDOF_trial_element + j] * vel_test_dV[i];
2653  elementJacobian_hbeta_hbeta[i][j] +=
2654  vel_trial_ref[k * nDOF_trial_element + j] * vel_test_dV[i];
2655  } // j
2656  } // i
2657  } // k
2658  //
2659  // load into element Jacobian into global Jacobian
2660  //
2661  for (int i = 0; i < nDOF_test_element; i++) {
2662  register int eN_i = eN * nDOF_test_element + i;
2663  for (int j = 0; j < nDOF_trial_element; j++) {
2664  register int eN_i_j = eN_i * nDOF_trial_element + j;
2665  globalJacobian[csrRowIndeces_h_h[eN_i] +
2666  csrColumnOffsets_h_h[eN_i_j]] +=
2667  elementJacobian_h_h[i][j];
2668  globalJacobian[csrRowIndeces_hu_hu[eN_i] +
2669  csrColumnOffsets_hu_hu[eN_i_j]] +=
2670  elementJacobian_hu_hu[i][j];
2671  globalJacobian[csrRowIndeces_hv_hv[eN_i] +
2672  csrColumnOffsets_hv_hv[eN_i_j]] +=
2673  elementJacobian_hv_hv[i][j];
2674  globalJacobian[csrRowIndeces_heta_heta[eN_i] +
2675  csrColumnOffsets_heta_heta[eN_i_j]] +=
2676  elementJacobian_heta_heta[i][j];
2677  globalJacobian[csrRowIndeces_hw_hw[eN_i] +
2678  csrColumnOffsets_hw_hw[eN_i_j]] +=
2679  elementJacobian_hw_hw[i][j];
2680  globalJacobian[csrRowIndeces_hbeta_hbeta[eN_i] +
2681  csrColumnOffsets_hbeta_hbeta[eN_i_j]] +=
2682  elementJacobian_hbeta_hbeta[i][j];
2683  } // j
2684  } // i
2685  } // elements
2686  }
2687 
2689  xt::pyarray<double> &mesh_trial_ref = args.array<double>("mesh_trial_ref");
2690  xt::pyarray<double> &mesh_grad_trial_ref =
2691  args.array<double>("mesh_grad_trial_ref");
2692  xt::pyarray<double> &mesh_dof = args.array<double>("mesh_dof");
2693  xt::pyarray<double> &mesh_velocity_dof =
2694  args.array<double>("mesh_velocity_dof");
2695  double MOVING_DOMAIN = args.scalar<double>("MOVING_DOMAIN");
2696  xt::pyarray<int> &mesh_l2g = args.array<int>("mesh_l2g");
2697  xt::pyarray<double> &dV_ref = args.array<double>("dV_ref");
2698  xt::pyarray<double> &h_trial_ref = args.array<double>("h_trial_ref");
2699  xt::pyarray<double> &h_grad_trial_ref =
2700  args.array<double>("h_grad_trial_ref");
2701  xt::pyarray<double> &h_test_ref = args.array<double>("h_test_ref");
2702  xt::pyarray<double> &h_grad_test_ref =
2703  args.array<double>("h_grad_test_ref");
2704  xt::pyarray<double> &vel_trial_ref = args.array<double>("vel_trial_ref");
2705  xt::pyarray<double> &vel_grad_trial_ref =
2706  args.array<double>("vel_grad_trial_ref");
2707  xt::pyarray<double> &vel_test_ref = args.array<double>("vel_test_ref");
2708  xt::pyarray<double> &vel_grad_test_ref =
2709  args.array<double>("vel_grad_test_ref");
2710  xt::pyarray<double> &mesh_trial_trace_ref =
2711  args.array<double>("mesh_trial_trace_ref");
2712  xt::pyarray<double> &mesh_grad_trial_trace_ref =
2713  args.array<double>("mesh_grad_trial_trace_ref");
2714  xt::pyarray<double> &dS_ref = args.array<double>("dS_ref");
2715  xt::pyarray<double> &h_trial_trace_ref =
2716  args.array<double>("h_trial_trace_ref");
2717  xt::pyarray<double> &h_grad_trial_trace_ref =
2718  args.array<double>("h_grad_trial_trace_ref");
2719  xt::pyarray<double> &h_test_trace_ref =
2720  args.array<double>("h_test_trace_ref");
2721  xt::pyarray<double> &h_grad_test_trace_ref =
2722  args.array<double>("h_grad_test_trace_ref");
2723  xt::pyarray<double> &vel_trial_trace_ref =
2724  args.array<double>("vel_trial_trace_ref");
2725  xt::pyarray<double> &vel_grad_trial_trace_ref =
2726  args.array<double>("vel_grad_trial_trace_ref");
2727  xt::pyarray<double> &vel_test_trace_ref =
2728  args.array<double>("vel_test_trace_ref");
2729  xt::pyarray<double> &vel_grad_test_trace_ref =
2730  args.array<double>("vel_grad_test_trace_ref");
2731  xt::pyarray<double> &normal_ref = args.array<double>("normal_ref");
2732  xt::pyarray<double> &boundaryJac_ref =
2733  args.array<double>("boundaryJac_ref");
2734  xt::pyarray<double> &elementDiameter =
2735  args.array<double>("elementDiameter");
2736  int nElements_global = args.scalar<int>("nElements_global");
2737  double g = args.scalar<double>("g");
2738  xt::pyarray<int> &h_l2g = args.array<int>("h_l2g");
2739  xt::pyarray<int> &vel_l2g = args.array<int>("vel_l2g");
2740  xt::pyarray<double> &b_dof = args.array<double>("b_dof");
2741  xt::pyarray<double> &h_dof = args.array<double>("h_dof");
2742  xt::pyarray<double> &hu_dof = args.array<double>("hu_dof");
2743  xt::pyarray<double> &hv_dof = args.array<double>("hv_dof");
2744  xt::pyarray<double> &q_cfl = args.array<double>("q_cfl");
2745  xt::pyarray<int> &sdInfo_hu_hu_rowptr =
2746  args.array<int>("sdInfo_hu_hu_rowptr");
2747  xt::pyarray<int> &sdInfo_hu_hu_colind =
2748  args.array<int>("sdInfo_hu_hu_colind");
2749  xt::pyarray<int> &sdInfo_hu_hv_rowptr =
2750  args.array<int>("sdInfo_hu_hv_rowptr");
2751  xt::pyarray<int> &sdInfo_hu_hv_colind =
2752  args.array<int>("sdInfo_hu_hv_colind");
2753  xt::pyarray<int> &sdInfo_hv_hv_rowptr =
2754  args.array<int>("sdInfo_hv_hv_rowptr");
2755  xt::pyarray<int> &sdInfo_hv_hv_colind =
2756  args.array<int>("sdInfo_hv_hv_colind");
2757  xt::pyarray<int> &sdInfo_hv_hu_rowptr =
2758  args.array<int>("sdInfo_hv_hu_rowptr");
2759  xt::pyarray<int> &sdInfo_hv_hu_colind =
2760  args.array<int>("sdInfo_hv_hu_colind");
2761  // h
2762  xt::pyarray<int> &csrRowIndeces_h_h = args.array<int>("csrRowIndeces_h_h");
2763  xt::pyarray<int> &csrColumnOffsets_h_h =
2764  args.array<int>("csrColumnOffsets_h_h");
2765  xt::pyarray<int> &csrRowIndeces_h_hu =
2766  args.array<int>("csrRowIndeces_h_hu");
2767  xt::pyarray<int> &csrColumnOffsets_h_hu =
2768  args.array<int>("csrColumnOffsets_h_hu");
2769  xt::pyarray<int> &csrRowIndeces_h_hv =
2770  args.array<int>("csrRowIndeces_h_hv");
2771  xt::pyarray<int> &csrColumnOffsets_h_hv =
2772  args.array<int>("csrColumnOffsets_h_hv");
2773  xt::pyarray<int> &csrRowIndeces_h_heta =
2774  args.array<int>("csrRowIndeces_h_heta");
2775  xt::pyarray<int> &csrColumnOffsets_h_heta =
2776  args.array<int>("csrColumnOffsets_h_heta");
2777  xt::pyarray<int> &csrRowIndeces_h_hw =
2778  args.array<int>("csrRowIndeces_h_hw");
2779  xt::pyarray<int> &csrColumnOffsets_h_hw =
2780  args.array<int>("csrColumnOffsets_h_hw");
2781  // hu
2782  xt::pyarray<int> &csrRowIndeces_hu_h =
2783  args.array<int>("csrRowIndeces_hu_h");
2784  xt::pyarray<int> &csrColumnOffsets_hu_h =
2785  args.array<int>("csrColumnOffsets_hu_h");
2786  xt::pyarray<int> &csrRowIndeces_hu_hu =
2787  args.array<int>("csrRowIndeces_hu_hu");
2788  xt::pyarray<int> &csrColumnOffsets_hu_hu =
2789  args.array<int>("csrColumnOffsets_hu_hu");
2790  xt::pyarray<int> &csrRowIndeces_hu_hv =
2791  args.array<int>("csrRowIndeces_hu_hv");
2792  xt::pyarray<int> &csrColumnOffsets_hu_hv =
2793  args.array<int>("csrColumnOffsets_hu_hv");
2794  xt::pyarray<int> &csrRowIndeces_hu_heta =
2795  args.array<int>("csrRowIndeces_hu_heta");
2796  xt::pyarray<int> &csrColumnOffsets_hu_heta =
2797  args.array<int>("csrColumnOffsets_hu_heta");
2798  xt::pyarray<int> &csrRowIndeces_hu_hw =
2799  args.array<int>("csrRowIndeces_hu_hw");
2800  xt::pyarray<int> &csrColumnOffsets_hu_hw =
2801  args.array<int>("csrColumnOffsets_hu_hw");
2802  // hv
2803  xt::pyarray<int> &csrRowIndeces_hv_h =
2804  args.array<int>("csrRowIndeces_hv_h");
2805  xt::pyarray<int> &csrColumnOffsets_hv_h =
2806  args.array<int>("csrColumnOffsets_hv_h");
2807  xt::pyarray<int> &csrRowIndeces_hv_hu =
2808  args.array<int>("csrRowIndeces_hv_hu");
2809  xt::pyarray<int> &csrColumnOffsets_hv_hu =
2810  args.array<int>("csrColumnOffsets_hv_hu");
2811  xt::pyarray<int> &csrRowIndeces_hv_hv =
2812  args.array<int>("csrRowIndeces_hv_hv");
2813  xt::pyarray<int> &csrColumnOffsets_hv_hv =
2814  args.array<int>("csrColumnOffsets_hv_hv");
2815  xt::pyarray<int> &csrRowIndeces_hv_heta =
2816  args.array<int>("csrRowIndeces_hv_heta");
2817  xt::pyarray<int> &csrColumnOffsets_hv_heta =
2818  args.array<int>("csrColumnOffsets_hv_heta");
2819  xt::pyarray<int> &csrRowIndeces_hv_hw =
2820  args.array<int>("csrRowIndeces_hv_hw");
2821  xt::pyarray<int> &csrColumnOffsets_hv_hw =
2822  args.array<int>("csrColumnOffsets_hv_hw");
2823  // heta
2824  xt::pyarray<int> &csrRowIndeces_heta_h =
2825  args.array<int>("csrRowIndeces_heta_h");
2826  xt::pyarray<int> &csrColumnOffsets_heta_h =
2827  args.array<int>("csrColumnOffsets_heta_h");
2828  xt::pyarray<int> &csrRowIndeces_heta_hu =
2829  args.array<int>("csrRowIndeces_heta_hu");
2830  xt::pyarray<int> &csrColumnOffsets_heta_hu =
2831  args.array<int>("csrColumnOffsets_heta_hu");
2832  xt::pyarray<int> &csrRowIndeces_heta_hv =
2833  args.array<int>("csrRowIndeces_heta_hv");
2834  xt::pyarray<int> &csrColumnOffsets_heta_hv =
2835  args.array<int>("csrColumnOffsets_heta_hv");
2836  xt::pyarray<int> &csrRowIndeces_heta_heta =
2837  args.array<int>("csrRowIndeces_heta_heta");
2838  xt::pyarray<int> &csrColumnOffsets_heta_heta =
2839  args.array<int>("csrColumnOffsets_heta_heta");
2840  xt::pyarray<int> &csrRowIndeces_heta_hw =
2841  args.array<int>("csrRowIndeces_heta_hw");
2842  xt::pyarray<int> &csrColumnOffsets_heta_hw =
2843  args.array<int>("csrColumnOffsets_heta_hw");
2844  // hw
2845  xt::pyarray<int> &csrRowIndeces_hw_h =
2846  args.array<int>("csrRowIndeces_hw_h");
2847  xt::pyarray<int> &csrColumnOffsets_hw_h =
2848  args.array<int>("csrColumnOffsets_hw_h");
2849  xt::pyarray<int> &csrRowIndeces_hw_hu =
2850  args.array<int>("csrRowIndeces_hw_hu");
2851  xt::pyarray<int> &csrColumnOffsets_hw_hu =
2852  args.array<int>("csrColumnOffsets_hw_hu");
2853  xt::pyarray<int> &csrRowIndeces_hw_hv =
2854  args.array<int>("csrRowIndeces_hw_hv");
2855  xt::pyarray<int> &csrColumnOffsets_hw_hv =
2856  args.array<int>("csrColumnOffsets_hw_hv");
2857  xt::pyarray<int> &csrRowIndeces_hw_heta =
2858  args.array<int>("csrRowIndeces_hw_heta");
2859  xt::pyarray<int> &csrColumnOffsets_hw_heta =
2860  args.array<int>("csrColumnOffsets_hw_heta");
2861  xt::pyarray<int> &csrRowIndeces_hw_hw =
2862  args.array<int>("csrRowIndeces_hw_hw");
2863  xt::pyarray<int> &csrColumnOffsets_hw_hw =
2864  args.array<int>("csrColumnOffsets_hw_hw");
2865  xt::pyarray<int> &csrRowIndeces_hbeta_hbeta =
2866  args.array<int>("csrRowIndeces_hbeta_hbeta");
2867  xt::pyarray<int> &csrColumnOffsets_hbeta_hbeta =
2868  args.array<int>("csrColumnOffsets_hbeta_hbeta");
2869  xt::pyarray<double> &globalJacobian = args.array<double>("globalJacobian");
2870  int nExteriorElementBoundaries_global =
2871  args.scalar<int>("nExteriorElementBoundaries_global");
2872  xt::pyarray<int> &exteriorElementBoundariesArray =
2873  args.array<int>("exteriorElementBoundariesArray");
2874  xt::pyarray<int> &elementBoundaryElementsArray =
2875  args.array<int>("elementBoundaryElementsArray");
2876  xt::pyarray<int> &elementBoundaryLocalElementBoundariesArray =
2877  args.array<int>("elementBoundaryLocalElementBoundariesArray");
2878  xt::pyarray<int> &isDOFBoundary_h = args.array<int>("isDOFBoundary_h");
2879  xt::pyarray<int> &isDOFBoundary_hu = args.array<int>("isDOFBoundary_hu");
2880  xt::pyarray<int> &isDOFBoundary_hv = args.array<int>("isDOFBoundary_hv");
2881  xt::pyarray<int> &isAdvectiveFluxBoundary_h =
2882  args.array<int>("isAdvectiveFluxBoundary_h");
2883  xt::pyarray<int> &isAdvectiveFluxBoundary_hu =
2884  args.array<int>("isAdvectiveFluxBoundary_hu");
2885  xt::pyarray<int> &isAdvectiveFluxBoundary_hv =
2886  args.array<int>("isAdvectiveFluxBoundary_hv");
2887  xt::pyarray<int> &isDiffusiveFluxBoundary_hu =
2888  args.array<int>("isDiffusiveFluxBoundary_hu");
2889  xt::pyarray<int> &isDiffusiveFluxBoundary_hv =
2890  args.array<int>("isDiffusiveFluxBoundary_hv");
2891  xt::pyarray<double> &ebqe_bc_h_ext = args.array<double>("ebqe_bc_h_ext");
2892  xt::pyarray<double> &ebqe_bc_flux_mass_ext =
2893  args.array<double>("ebqe_bc_flux_mass_ext");
2894  xt::pyarray<double> &ebqe_bc_flux_mom_hu_adv_ext =
2895  args.array<double>("ebqe_bc_flux_mom_hu_adv_ext");
2896  xt::pyarray<double> &ebqe_bc_flux_mom_hv_adv_ext =
2897  args.array<double>("ebqe_bc_flux_mom_hv_adv_ext");
2898  xt::pyarray<double> &ebqe_bc_hu_ext = args.array<double>("ebqe_bc_hu_ext");
2899  xt::pyarray<double> &ebqe_bc_flux_hu_diff_ext =
2900  args.array<double>("ebqe_bc_flux_hu_diff_ext");
2901  xt::pyarray<double> &ebqe_penalty_ext =
2902  args.array<double>("ebqe_penalty_ext");
2903  xt::pyarray<double> &ebqe_bc_hv_ext = args.array<double>("ebqe_bc_hv_ext");
2904  xt::pyarray<double> &ebqe_bc_flux_hv_diff_ext =
2905  args.array<double>("ebqe_bc_flux_hv_diff_ext");
2906  xt::pyarray<int> &csrColumnOffsets_eb_h_h =
2907  args.array<int>("csrColumnOffsets_eb_h_h");
2908  xt::pyarray<int> &csrColumnOffsets_eb_h_hu =
2909  args.array<int>("csrColumnOffsets_eb_h_hu");
2910  xt::pyarray<int> &csrColumnOffsets_eb_h_hv =
2911  args.array<int>("csrColumnOffsets_eb_h_hv");
2912  xt::pyarray<int> &csrColumnOffsets_eb_hu_h =
2913  args.array<int>("csrColumnOffsets_eb_hu_h");
2914  xt::pyarray<int> &csrColumnOffsets_eb_hu_hu =
2915  args.array<int>("csrColumnOffsets_eb_hu_hu");
2916  xt::pyarray<int> &csrColumnOffsets_eb_hu_hv =
2917  args.array<int>("csrColumnOffsets_eb_hu_hv");
2918  xt::pyarray<int> &csrColumnOffsets_eb_hv_h =
2919  args.array<int>("csrColumnOffsets_eb_hv_h");
2920  xt::pyarray<int> &csrColumnOffsets_eb_hv_hu =
2921  args.array<int>("csrColumnOffsets_eb_hv_hu");
2922  xt::pyarray<int> &csrColumnOffsets_eb_hv_hv =
2923  args.array<int>("csrColumnOffsets_eb_hv_hv");
2924  double dt = args.scalar<double>("dt");
2925  //
2926  // loop over elements to compute volume integrals and load them into the
2927  // element Jacobians and global Jacobian
2928  //
2929  for (int eN = 0; eN < nElements_global; eN++) {
2930  register double elementJacobian_h_h[nDOF_test_element]
2931  [nDOF_trial_element],
2932  elementJacobian_hu_hu[nDOF_test_element][nDOF_trial_element],
2933  elementJacobian_hv_hv[nDOF_test_element][nDOF_trial_element],
2934  elementJacobian_heta_heta[nDOF_test_element][nDOF_trial_element],
2935  elementJacobian_hw_hw[nDOF_test_element][nDOF_trial_element],
2936  elementJacobian_hbeta_hbeta[nDOF_test_element][nDOF_trial_element];
2937  for (int i = 0; i < nDOF_test_element; i++)
2938  for (int j = 0; j < nDOF_trial_element; j++) {
2939  elementJacobian_h_h[i][j] = 0.0;
2940  elementJacobian_hu_hu[i][j] = 0.0;
2941  elementJacobian_hv_hv[i][j] = 0.0;
2942  elementJacobian_heta_heta[i][j] = 0.0;
2943  elementJacobian_hw_hw[i][j] = 0.0;
2944  elementJacobian_hbeta_hbeta[i][j] = 0.0;
2945  }
2946  for (int k = 0; k < nQuadraturePoints_element; k++) {
2947  int eN_k = eN * nQuadraturePoints_element +
2948  k, // index to a scalar at a quadrature point
2949  eN_k_nSpace = eN_k * nSpace,
2950  eN_nDOF_trial_element =
2951  eN * nDOF_trial_element; // index to a vector at a
2952  // quadrature point
2953 
2954  // declare local storage
2955  register double jac[nSpace * nSpace], jacDet, jacInv[nSpace * nSpace],
2956  dV, h_test_dV[nDOF_test_element], vel_test_dV[nDOF_test_element], x,
2957  y, xt, yt;
2958  // get jacobian, etc for mapping reference element
2959  ck.calculateMapping_element(
2960  eN, k, mesh_dof.data(), mesh_l2g.data(), mesh_trial_ref.data(),
2961  mesh_grad_trial_ref.data(), jac, jacDet, jacInv, x, y);
2962  // get the physical integration weight
2963  dV = fabs(jacDet) * dV_ref[k];
2964  // precalculate test function products with integration weights
2965  for (int j = 0; j < nDOF_trial_element; j++) {
2966  h_test_dV[j] = h_test_ref[k * nDOF_trial_element + j] * dV;
2967  vel_test_dV[j] = vel_test_ref[k * nDOF_trial_element + j] * dV;
2968  }
2969 
2970  for (int i = 0; i < nDOF_test_element; i++) {
2971  register int i_nSpace = i * nSpace;
2972  for (int j = 0; j < nDOF_trial_element; j++) {
2973  register int j_nSpace = j * nSpace;
2974  elementJacobian_h_h[i][j] += (i == j ? 1.0 : 0.0) * h_test_dV[i];
2975  elementJacobian_hu_hu[i][j] +=
2976  (i == j ? 1.0 : 0.0) * vel_test_dV[i];
2977  elementJacobian_hv_hv[i][j] +=
2978  (i == j ? 1.0 : 0.0) * vel_test_dV[i];
2979  elementJacobian_heta_heta[i][j] +=
2980  (i == j ? 1.0 : 0.0) * vel_test_dV[i];
2981  elementJacobian_hw_hw[i][j] +=
2982  (i == j ? 1.0 : 0.0) * vel_test_dV[i];
2983  elementJacobian_hbeta_hbeta[i][j] +=
2984  (i == j ? 1.0 : 0.0) * vel_test_dV[i];
2985  } // j
2986  } // i
2987  } // k
2988  //
2989  // load into element Jacobian into global Jacobian
2990  //
2991  for (int i = 0; i < nDOF_test_element; i++) {
2992  register int eN_i = eN * nDOF_test_element + i;
2993  for (int j = 0; j < nDOF_trial_element; j++) {
2994  register int eN_i_j = eN_i * nDOF_trial_element + j;
2995  globalJacobian[csrRowIndeces_h_h[eN_i] +
2996  csrColumnOffsets_h_h[eN_i_j]] +=
2997  elementJacobian_h_h[i][j];
2998  globalJacobian[csrRowIndeces_hu_hu[eN_i] +
2999  csrColumnOffsets_hu_hu[eN_i_j]] +=
3000  elementJacobian_hu_hu[i][j];
3001  globalJacobian[csrRowIndeces_hv_hv[eN_i] +
3002  csrColumnOffsets_hv_hv[eN_i_j]] +=
3003  elementJacobian_hv_hv[i][j];
3004  globalJacobian[csrRowIndeces_heta_heta[eN_i] +
3005  csrColumnOffsets_heta_heta[eN_i_j]] +=
3006  elementJacobian_heta_heta[i][j];
3007  globalJacobian[csrRowIndeces_hw_hw[eN_i] +
3008  csrColumnOffsets_hw_hw[eN_i_j]] +=
3009  elementJacobian_hw_hw[i][j];
3010  globalJacobian[csrRowIndeces_hbeta_hbeta[eN_i] +
3011  csrColumnOffsets_hbeta_hbeta[eN_i_j]] +=
3012  elementJacobian_hbeta_hbeta[i][j];
3013  } // j
3014  } // i
3015  } // elements
3016  }
3017 }; // namespace proteus
3018 
3019 inline GN_SW2DCV_base *
3020 newGN_SW2DCV(int nSpaceIn, int nQuadraturePoints_elementIn,
3021  int nDOF_mesh_trial_elementIn, int nDOF_trial_elementIn,
3022  int nDOF_test_elementIn, int nQuadraturePoints_elementBoundaryIn,
3023  int CompKernelFlag) {
3025  CompKernel>(
3026  nSpaceIn, nQuadraturePoints_elementIn, nDOF_mesh_trial_elementIn,
3027  nDOF_trial_elementIn, nDOF_test_elementIn,
3028  nQuadraturePoints_elementBoundaryIn, CompKernelFlag);
3029 }
3030 } // end namespace proteus
3031 
3032 #endif
proteus::GN_SW2DCV_base::calculatePreStep
virtual void calculatePreStep(arguments_dict &args)=0
proteus::GN_SW2DCV::maxWaveSpeedSharpInitialGuess
double maxWaveSpeedSharpInitialGuess(const double g, const double nx, const double ny, const double hL, const double huL, double hvL, const double hetaL, const double inv_MeshL, const double hR, const double huR, const double hvR, const double hetaR, const double inv_MeshR, const double hEps)
Definition: GN_SW2DCV.h:128
proteus::GN_SW2DCV_base
Definition: GN_SW2DCV.h:98
proteus::GN_SW2DCV::calculateResidual
void calculateResidual(arguments_dict &args)
Definition: GN_SW2DCV.h:1884
proteus::GN_SW2DCV
Definition: GN_SW2DCV.h:114
proteus::DENTROPY_DHU
double DENTROPY_DHU(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:61
proteus::GN_SW2DCV_base::calculateLumpedMassMatrix
virtual void calculateLumpedMassMatrix(arguments_dict &args)=0
proteus::DENTROPY_DH
double DENTROPY_DH(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:56
IF_BOTH_GAMMA_BRANCHES
#define IF_BOTH_GAMMA_BRANCHES
Definition: GN_SW2DCV.h:16
proteus::GN_SW2DCV::ck
CompKernelType ck
Definition: GN_SW2DCV.h:117
proteus::GN_SW2DCV_base::calculateMassMatrix
virtual void calculateMassMatrix(arguments_dict &args)=0
proteus::GN_SW2DCV::calculateEdgeBasedCFL
double calculateEdgeBasedCFL(arguments_dict &args)
Definition: GN_SW2DCV.h:896
proteus::GN_SW2DCV::calculatePreStep
void calculatePreStep(arguments_dict &args)
Definition: GN_SW2DCV.h:998
proteus::GN_SW2DCV_base::calculateEV
virtual void calculateEV(arguments_dict &args)=0
ModelFactory.h
CompKernel.h
proteus::arguments_dict::scalar
T & scalar(const std::string &key)
LIMITING_ITERATION
#define LIMITING_ITERATION
Definition: GN_SW2DCV.h:17
proteus::relaxation
double relaxation(const double &xi, const double &alpha)
Definition: GN_SW2DCV.h:85
proteus::arguments_dict::array
xt::pyarray< T > & array(const std::string &key)
proteus::GN_SW2DCV::calculateLumpedMassMatrix
void calculateLumpedMassMatrix(arguments_dict &args)
Definition: GN_SW2DCV.h:2688
num
Int num
Definition: Headers.h:32
VEL_FIX_POWER
#define VEL_FIX_POWER
Definition: GN_SW2DCV.h:14
proteus::ENTROPY_FLUX1
double ENTROPY_FLUX1(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:71
proteus::GN_SW2DCV::nDOF_test_X_trial_element
const int nDOF_test_X_trial_element
Definition: GN_SW2DCV.h:116
min
#define min(a, b)
Definition: jf.h:71
proteus::GN_nu3
double GN_nu3(const double &g, const double &hR, const double &uR, const double &etaR, const double &inv_meshSizeR)
Definition: GN_SW2DCV.h:38
v
Double v
Definition: Headers.h:95
proteus::GN_SW2DCV_base::calculateBoundsAndHighOrderRHS
virtual void calculateBoundsAndHighOrderRHS(arguments_dict &args)=0
proteus::ENTROPY
double ENTROPY(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:50
proteus::GN_SW2DCV::calculateCFL
void calculateCFL(const double &elementDiameter, const double &g, const double &h, const double &hu, const double &hv, const double hEps, double &cfl)
Definition: GN_SW2DCV.h:154
z
Double * z
Definition: Headers.h:49
proteus::GN_SW2DCV_base::convexLimiting
virtual void convexLimiting(arguments_dict &args)=0
u
Double u
Definition: Headers.h:89
proteus::GN_SW2DCV::calculateResidual
void calculateResidual(xt::pyarray< double > &mesh_trial_ref, xt::pyarray< double > &mesh_grad_trial_ref, xt::pyarray< double > &mesh_dof, xt::pyarray< double > &mesh_velocity_dof, double MOVING_DOMAIN, xt::pyarray< int > &mesh_l2g, xt::pyarray< double > &dV_ref, xt::pyarray< double > &h_trial_ref, xt::pyarray< double > &h_grad_trial_ref, xt::pyarray< double > &h_test_ref, xt::pyarray< double > &h_grad_test_ref, xt::pyarray< double > &vel_trial_ref, xt::pyarray< double > &vel_grad_trial_ref, xt::pyarray< double > &vel_test_ref, xt::pyarray< double > &vel_grad_test_ref, xt::pyarray< double > &mesh_trial_trace_ref, xt::pyarray< double > &mesh_grad_trial_trace_ref, xt::pyarray< double > &dS_ref, xt::pyarray< double > &h_trial_trace_ref, xt::pyarray< double > &h_grad_trial_trace_ref, xt::pyarray< double > &h_test_trace_ref, xt::pyarray< double > &h_grad_test_trace_ref, xt::pyarray< double > &vel_trial_trace_ref, xt::pyarray< double > &vel_grad_trial_trace_ref, xt::pyarray< double > &vel_test_trace_ref, xt::pyarray< double > &vel_grad_test_trace_ref, xt::pyarray< double > &normal_ref, xt::pyarray< double > &boundaryJac_ref, xt::pyarray< double > &elementDiameter, int nElements_global, double useRBLES, double useMetrics, double alphaBDF, double nu, double g, xt::pyarray< int > &h_l2g, xt::pyarray< int > &vel_l2g, xt::pyarray< double > &h_dof_old, xt::pyarray< double > &hu_dof_old, xt::pyarray< double > &hv_dof_old, xt::pyarray< double > &heta_dof_old, xt::pyarray< double > &hw_dof_old, xt::pyarray< double > &b_dof, xt::pyarray< double > &h_dof, xt::pyarray< double > &hu_dof, xt::pyarray< double > &hv_dof, xt::pyarray< double > &heta_dof, xt::pyarray< double > &hw_dof, xt::pyarray< double > &h_dof_sge, xt::pyarray< double > &hu_dof_sge, xt::pyarray< double > &hv_dof_sge, xt::pyarray< double > &heta_dof_sge, xt::pyarray< double > &hw_dof_sge, xt::pyarray< double > &q_mass_acc, xt::pyarray< double > &q_mom_hu_acc, xt::pyarray< double > &q_mom_hv_acc, xt::pyarray< double > &q_mass_adv, xt::pyarray< double > &q_mass_acc_beta_bdf, xt::pyarray< double > &q_mom_hu_acc_beta_bdf, xt::pyarray< double > &q_mom_hv_acc_beta_bdf, xt::pyarray< double > &q_cfl, xt::pyarray< int > &sdInfo_hu_hu_rowptr, xt::pyarray< int > &sdInfo_hu_hu_colind, xt::pyarray< int > &sdInfo_hu_hv_rowptr, xt::pyarray< int > &sdInfo_hu_hv_colind, xt::pyarray< int > &sdInfo_hv_hv_rowptr, xt::pyarray< int > &sdInfo_hv_hv_colind, xt::pyarray< int > &sdInfo_hv_hu_rowptr, xt::pyarray< int > &sdInfo_hv_hu_colind, int offset_h, int offset_hu, int offset_hv, int offset_heta, int offset_hw, int stride_h, int stride_hu, int stride_hv, int stride_heta, int stride_hw, xt::pyarray< double > &globalResidual, int nExteriorElementBoundaries_global, xt::pyarray< int > &exteriorElementBoundariesArray, xt::pyarray< int > &elementBoundaryElementsArray, xt::pyarray< int > &elementBoundaryLocalElementBoundariesArray, xt::pyarray< int > &isDOFBoundary_h, xt::pyarray< int > &isDOFBoundary_hu, xt::pyarray< int > &isDOFBoundary_hv, xt::pyarray< int > &isAdvectiveFluxBoundary_h, xt::pyarray< int > &isAdvectiveFluxBoundary_hu, xt::pyarray< int > &isAdvectiveFluxBoundary_hv, xt::pyarray< int > &isDiffusiveFluxBoundary_hu, xt::pyarray< int > &isDiffusiveFluxBoundary_hv, xt::pyarray< double > &ebqe_bc_h_ext, xt::pyarray< double > &ebqe_bc_flux_mass_ext, xt::pyarray< double > &ebqe_bc_flux_mom_hu_adv_ext, xt::pyarray< double > &ebqe_bc_flux_mom_hv_adv_ext, xt::pyarray< double > &ebqe_bc_hu_ext, xt::pyarray< double > &ebqe_bc_flux_hu_diff_ext, xt::pyarray< double > &ebqe_penalty_ext, xt::pyarray< double > &ebqe_bc_hv_ext, xt::pyarray< double > &ebqe_bc_flux_hv_diff_ext, xt::pyarray< double > &q_velocity, xt::pyarray< double > &ebqe_velocity, xt::pyarray< double > &flux, xt::pyarray< double > &elementResidual_h_save, xt::pyarray< double > &Cx, xt::pyarray< double > &Cy, xt::pyarray< double > &CTx, xt::pyarray< double > &CTy, int numDOFsPerEqn, int NNZ, xt::pyarray< int > &csrRowIndeces_DofLoops, xt::pyarray< int > &csrColumnOffsets_DofLoops, xt::pyarray< double > &lumped_mass_matrix, double cfl_run, double hEps, xt::pyarray< double > &hReg, xt::pyarray< double > &hnp1_at_quad_point, xt::pyarray< double > &hunp1_at_quad_point, xt::pyarray< double > &hvnp1_at_quad_point, xt::pyarray< double > &hetanp1_at_quad_point, xt::pyarray< double > &hwnp1_at_quad_point, xt::pyarray< double > &extendedSourceTerm_hu, xt::pyarray< double > &extendedSourceTerm_hv, xt::pyarray< double > &extendedSourceTerm_heta, xt::pyarray< double > &extendedSourceTerm_hw, xt::pyarray< double > &dH_minus_dL, xt::pyarray< double > &muH_minus_muL, double cE, int LUMPED_MASS_MATRIX, double dt, int LINEAR_FRICTION, double mannings, xt::pyarray< double > &quantDOFs, int SECOND_CALL_CALCULATE_RESIDUAL, int COMPUTE_NORMALS, xt::pyarray< double > &normalx, xt::pyarray< double > &normaly, xt::pyarray< double > &dLow, xt::pyarray< double > &hBT, xt::pyarray< double > &huBT, xt::pyarray< double > &hvBT, xt::pyarray< double > &hetaBT, xt::pyarray< double > &hwBT, int lstage, xt::pyarray< double > &new_SourceTerm_hu, xt::pyarray< double > &new_SourceTerm_hv, xt::pyarray< double > &new_SourceTerm_heta, xt::pyarray< double > &new_SourceTerm_hw)
Definition: GN_SW2DCV.h:1587
c
Double c
Definition: Headers.h:54
proteus::GN_SW2DCV::calculateBoundsAndHighOrderRHS
void calculateBoundsAndHighOrderRHS(arguments_dict &args)
Definition: GN_SW2DCV.h:1220
xt
Definition: AddedMass.cpp:7
proteus::newGN_SW2DCV
GN_SW2DCV_base * newGN_SW2DCV(int nSpaceIn, int nQuadraturePoints_elementIn, int nDOF_mesh_trial_elementIn, int nDOF_trial_elementIn, int nDOF_test_elementIn, int nQuadraturePoints_elementBoundaryIn, int CompKernelFlag)
Definition: GN_SW2DCV.h:3020
proteus::GN_SW2DCV_base::calculateEdgeBasedCFL
virtual double calculateEdgeBasedCFL(arguments_dict &args)=0
proteus::GN_SW2DCV::calculateEV
void calculateEV(arguments_dict &args)
Definition: GN_SW2DCV.h:1100
CompKernel
Definition: CompKernel.h:1018
proteus::chooseAndAllocateDiscretization2D
Model_Base * chooseAndAllocateDiscretization2D(int nSpaceIn, int nQuadraturePoints_elementIn, int nDOF_mesh_trial_elementIn, int nDOF_trial_elementIn, int nDOF_test_elementIn, int nDOF_v_trial_elementIn, int nDOF_v_test_elementIn, int nQuadraturePoints_elementBoundaryIn, int CompKernelFlag)
Definition: MixedModelFactory.h:309
proteus::GN_SW2DCV_base::~GN_SW2DCV_base
virtual ~GN_SW2DCV_base()
Definition: GN_SW2DCV.h:100
proteus
Definition: ADR.h:17
proteus::GN_SW2DCV_base::calculateResidual
virtual void calculateResidual(arguments_dict &args)=0
LAMBDA_MGN
#define LAMBDA_MGN
Definition: GN_SW2DCV.h:15
proteus::GN_SW2DCV::GN_SW2DCV
GN_SW2DCV()
Definition: GN_SW2DCV.h:118
proteus::DENTROPY_DHV
double DENTROPY_DHV(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:66
proteus::GN_SW2DCV::convexLimiting
void convexLimiting(arguments_dict &args)
Definition: GN_SW2DCV.h:173
proteus::arguments_dict
Definition: ArgumentsDict.h:70
proteus::GN_SW2DCV::calculateMassMatrix
void calculateMassMatrix(arguments_dict &args)
Definition: GN_SW2DCV.h:2319
proteus::ENTROPY_FLUX2
double ENTROPY_FLUX2(const double &g, const double &h, const double &hu, const double &hv, const double &z, const double &one_over_hReg)
Definition: GN_SW2DCV.h:78
proteus::GN_nu1
double GN_nu1(const double &g, const double &hL, const double &uL, const double &etaL, const double &inv_meshSizeL)
Definition: GN_SW2DCV.h:25
max
#define max(a, b)
Definition: analyticalSolutions.h:14
psi
Double psi
Definition: Headers.h:78
ArgumentsDict.h