Loading...
Searching...
No Matches
epsilon_divdiv.hpp
Go to the documentation of this file.
1#pragma once
2
3#include "../../quadrature/quadrature.hpp"
5#include "dense/vec.hpp"
9#include "linalg/operator.hpp"
11#include "linalg/vector.hpp"
12#include "linalg/vector_q1.hpp"
13#include "util/timer.hpp"
14
16
17template < typename ScalarT, int VecDim = 3 >
19{
20 public:
23 using ScalarType = ScalarT;
24 static constexpr int LocalMatrixDim = 18;
27
28 private:
29 LocalMatrixStorage local_matrix_storage_;
30
32
37
38 bool treat_boundary_;
39 bool diagonal_;
40
41 linalg::OperatorApplyMode operator_apply_mode_;
42 linalg::OperatorCommunicationMode operator_communication_mode_;
43 linalg::OperatorStoredMatrixMode operator_stored_matrix_mode_;
44
47
50
51 // Quadrature points.
52 const int num_quad_points = quadrature::quad_felippa_3x2_num_quad_points;
53
56
57 public:
64 bool treat_boundary,
65 bool diagonal,
67 linalg::OperatorCommunicationMode operator_communication_mode =
70 : domain_( domain )
71 , grid_( grid )
72 , radii_( radii )
73 , mask_( mask )
74 , k_( k )
75 , treat_boundary_( treat_boundary )
76 , diagonal_( diagonal )
77 , operator_apply_mode_( operator_apply_mode )
78 , operator_communication_mode_( operator_communication_mode )
79 , operator_stored_matrix_mode_( operator_stored_matrix_mode )
80 // TODO: we can reuse the send and recv buffers and pass in from the outside somehow
81 , send_buffers_( domain )
82 , recv_buffers_( domain )
83 {
86 }
87
89 const linalg::OperatorApplyMode operator_apply_mode,
90 const linalg::OperatorCommunicationMode operator_communication_mode )
91 {
92 operator_apply_mode_ = operator_apply_mode;
93 operator_communication_mode_ = operator_communication_mode;
94 }
95
96 /// @brief S/Getter for diagonal member
97 void set_diagonal( bool v ) { diagonal_ = v; }
98
99 /// @brief Getter for coefficient
101
102 /// @brief Getter for domain member
103 const grid::shell::DistributedDomain& get_domain() const { return domain_; }
104
105 /// @brief Getter for radii member
107
108 /// @brief Getter for grid member
110
111 /// @brief Getter for mask member
112 KOKKOS_INLINE_FUNCTION
114 const int local_subdomain_id,
115 const int x_cell,
116 const int y_cell,
117 const int r_cell,
119 { return util::has_flag( mask_( local_subdomain_id, x_cell, y_cell, r_cell ), flag ); }
120
121 /// @brief allocates memory for the local matrices
123 linalg::OperatorStoredMatrixMode operator_stored_matrix_mode,
124 int level_range,
126 {
127 operator_stored_matrix_mode_ = operator_stored_matrix_mode;
128
129 // allocate storage if necessary
130 if ( operator_stored_matrix_mode_ != linalg::OperatorStoredMatrixMode::Off )
131 {
133 domain_, operator_stored_matrix_mode_, level_range, GCAElements );
134 }
135 }
136
137 linalg::OperatorStoredMatrixMode get_stored_matrix_mode() { return operator_stored_matrix_mode_; }
138
139 /// @brief Set the local matrix stored in the operator
140 KOKKOS_INLINE_FUNCTION
142 const int local_subdomain_id,
143 const int x_cell,
144 const int y_cell,
145 const int r_cell,
146 const int wedge,
148 {
149 KOKKOS_ASSERT( operator_stored_matrix_mode_ != linalg::OperatorStoredMatrixMode::Off );
150 local_matrix_storage_.set_matrix( local_subdomain_id, x_cell, y_cell, r_cell, wedge, mat );
151 }
152
153 /// @brief Retrives the local matrix
154 /// if there is stored local matrices, the desired local matrix is loaded and returned
155 /// if not, the local matrix is assembled on-the-fly
156 KOKKOS_INLINE_FUNCTION
158 const int local_subdomain_id,
159 const int x_cell,
160 const int y_cell,
161 const int r_cell,
162 const int wedge ) const
163 {
164 // request from storage
165 if ( operator_stored_matrix_mode_ != linalg::OperatorStoredMatrixMode::Off )
166 {
167 if ( !local_matrix_storage_.has_matrix( local_subdomain_id, x_cell, y_cell, r_cell, wedge ) )
168 {
169 Kokkos::abort( "No matrix found at that spatial index." );
170 }
171 return local_matrix_storage_.get_matrix( local_subdomain_id, x_cell, y_cell, r_cell, wedge );
172 }
173 else
174 {
175 return assemble_local_matrix( local_subdomain_id, x_cell, y_cell, r_cell, wedge );
176 }
177 }
178
179 void apply_impl( const SrcVectorType& src, DstVectorType& dst )
180 {
181 util::Timer timer_apply( "epsilon_divdiv_apply" );
182
183 if ( operator_apply_mode_ == linalg::OperatorApplyMode::Replace )
184 {
185 assign( dst, 0 );
186 }
187
188 src_ = src.grid_data();
189 dst_ = dst.grid_data();
190
191 if ( src_.extent( 0 ) != dst_.extent( 0 ) || src_.extent( 1 ) != dst_.extent( 1 ) ||
192 src_.extent( 2 ) != dst_.extent( 2 ) || src_.extent( 3 ) != dst_.extent( 3 ) )
193 {
194 throw std::runtime_error( "EpsilonDivDiv: src/dst mismatch" );
195 }
196
197 if ( src_.extent( 1 ) != grid_.extent( 1 ) || src_.extent( 2 ) != grid_.extent( 2 ) )
198 {
199 throw std::runtime_error( "EpsilonDivDiv: src/dst mismatch" );
200 }
201
202 util::Timer timer_kernel( "epsilon_divdiv_kernel" );
203 Kokkos::parallel_for( "matvec", grid::shell::local_domain_md_range_policy_cells( domain_ ), *this );
204 Kokkos::fence();
205 timer_kernel.stop();
206
207 if ( operator_communication_mode_ == linalg::OperatorCommunicationMode::CommunicateAdditively )
208 {
209 util::Timer timer_comm( "epsilon_divdiv_comm" );
210
212 domain_, dst_, send_buffers_, recv_buffers_ );
214 }
215 }
216
217 KOKKOS_INLINE_FUNCTION void
218 operator()( const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell ) const
219 {
220 // If we have stored lmatrices, use them.
221 // It's the user's responsibility to write meaningful matrices via set_lmatrix()
222 // We probably never want to assemble lmatrices with DCA and store,
223 // so GCA should be the actor storing matrices.
224 // use stored matrices (at least on some elements)
225 if ( operator_stored_matrix_mode_ != linalg::OperatorStoredMatrixMode::Off )
226 {
228
229 if ( operator_stored_matrix_mode_ == linalg::OperatorStoredMatrixMode::Full )
230 {
231 A[0] = local_matrix_storage_.get_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 0 );
232 A[1] = local_matrix_storage_.get_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 1 );
233 }
234 else if ( operator_stored_matrix_mode_ == linalg::OperatorStoredMatrixMode::Selective )
235 {
236 if ( local_matrix_storage_.has_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 0 ) &&
237 local_matrix_storage_.has_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 1 ) )
238 {
239 A[0] = local_matrix_storage_.get_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 0 );
240 A[1] = local_matrix_storage_.get_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 1 );
241 }
242 else
243 {
244 // Kokkos::abort("Matrix not found.");
245 A[0] = assemble_local_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 0 );
246 A[1] = assemble_local_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 1 );
247 }
248 }
249 // BCs are applied by GCA ... to be discussed for free-slip
250
251 if ( diagonal_ )
252 {
253 A[0] = A[0].diagonal();
254 A[1] = A[1].diagonal();
255 }
256
258 for ( int dimj = 0; dimj < 3; dimj++ )
259 {
262 src_d, local_subdomain_id, x_cell, y_cell, r_cell, dimj, src_ );
263
264 for ( int wedge = 0; wedge < num_wedges_per_hex_cell; wedge++ )
265 {
266 for ( int i = 0; i < num_nodes_per_wedge; i++ )
267 {
268 src[wedge]( dimj * num_nodes_per_wedge + i ) = src_d[wedge]( i );
269 }
270 }
271 }
272
274
275 dst[0] = A[0] * src[0];
276 dst[1] = A[1] * src[1];
277
278 for ( int dimi = 0; dimi < 3; dimi++ )
279 {
281 dst_d[0] = dst[0].template slice< 6 >( dimi * num_nodes_per_wedge );
282 dst_d[1] = dst[1].template slice< 6 >( dimi * num_nodes_per_wedge );
283
285 dst_, local_subdomain_id, x_cell, y_cell, r_cell, dimi, dst_d );
286 }
287 }
288 else
289 {
290 // Gather surface points for each wedge.
292 wedge_surface_physical_coords( wedge_phy_surf, grid_, local_subdomain_id, x_cell, y_cell );
293
294 // Gather wedge radii.
295 const ScalarT r_1 = radii_( local_subdomain_id, r_cell );
296 const ScalarT r_2 = radii_( local_subdomain_id, r_cell + 1 );
297
299 extract_local_wedge_scalar_coefficients( k_local_hex, local_subdomain_id, x_cell, y_cell, r_cell, k_ );
300
301 constexpr int hex_offset_x[8] = { 0, 1, 0, 1, 0, 1, 0, 1 };
302 constexpr int hex_offset_y[8] = { 0, 0, 1, 1, 0, 0, 1, 1 };
303 constexpr int hex_offset_r[8] = { 0, 0, 0, 0, 1, 1, 1, 1 };
304
305 // FE dimensions: velocity coupling components of epsilon operator
306
307 for ( int dimi = 0; dimi < 3; ++dimi )
308 {
309 for ( int dimj = 0; dimj < 3; ++dimj )
310 {
311 if ( diagonal_ and dimi != dimj )
312 continue;
313
314 ScalarType src_local_hex[8] = {};
315 ScalarType dst_local_hex[8] = {};
316
317 for ( int i = 0; i < 8; i++ )
318 {
319 src_local_hex[i] = src_(
320 local_subdomain_id,
321 x_cell + hex_offset_x[i],
322 y_cell + hex_offset_y[i],
323 r_cell + hex_offset_r[i],
324 dimi );
325 }
326
327 // spatial dimensions: quadrature points and wedge
328 for ( int q = 0; q < num_quad_points; q++ )
329 {
330 for ( int wedge = 0; wedge < num_wedges_per_hex_cell; wedge++ )
331 {
334 ScalarType jdet_keval_quadweight = 0;
335
337 wedge,
338 quad_points[q],
339 quad_weights[q],
340 r_1,
341 r_2,
342 wedge_phy_surf,
343 k_local_hex,
344 dimi,
345 dimj,
346 sym_grad_i,
347 sym_grad_j,
348 jdet_keval_quadweight );
349
351 src_local_hex,
352 dst_local_hex,
353 wedge,
354 jdet_keval_quadweight,
355 sym_grad_i,
356 sym_grad_j,
357 dimi,
358 dimj,
359 r_cell );
360 }
361 }
362
363 for ( int i = 0; i < 8; i++ )
364 {
365 Kokkos::atomic_add(
366 &dst_(
367 local_subdomain_id,
368 x_cell + hex_offset_x[i],
369 y_cell + hex_offset_y[i],
370 r_cell + hex_offset_r[i],
371 dimj ),
372 dst_local_hex[i] );
373 }
374 }
375 }
376 }
377 }
378
379 /// @brief: For both trial and test space this function sets up a vector:
380 /// each vector element holds the symmetric gradient (a 3x3 matrix) of the shape function of the corresponding dof
381 /// (if dimi == dimj, these are the same and we are on the diagonal of the vectorial diffusion operator)
382 /// Additionally, we compute the scalar factor for the numerical integral comp: determinant of the jacobian,
383 /// evaluation of the coefficient k on the element and the quadrature weight of the current quad-point.
384
385 /// The idea of this function is that the two vectors can be:
386 /// - accumulated to the result of the local matvec with 2 * num_nodes_per_wedge complexity
387 /// by scaling the dot product of the trial vec and local src dofs with each element of the test vec
388 /// (and adding to the dst dofs, this is the fused local matvec).
389 /// - propagated to the local matrix by an outer product of the two vectors
390 /// (without applying it to dofs). This is e.g. required to assemble the finest grid local
391 /// matrix on-the-fly during GCA/Galerkin coarsening.
392
393 ///
394 KOKKOS_INLINE_FUNCTION void assemble_trial_test_vecs(
395 const int wedge,
396 const dense::Vec< ScalarType, VecDim >& quad_point,
397 const ScalarType quad_weight,
398 const ScalarT r_1,
399 const ScalarT r_2,
400 dense::Vec< ScalarT, 3 > ( *wedge_phy_surf )[3],
401 const dense::Vec< ScalarT, 6 >* k_local_hex,
402 const int dimi,
403 const int dimj,
406 ScalarType& jdet_keval_quadweight ) const
407 {
408 dense::Mat< ScalarType, VecDim, VecDim > J = jac( wedge_phy_surf[wedge], r_1, r_2, quad_point );
409 const auto det = J.det();
410 const auto abs_det = Kokkos::abs( det );
411 const dense::Mat< ScalarType, VecDim, VecDim > J_inv_transposed = J.inv_transposed( det );
412
413 // dot of coeff dofs and element-local shape functions to evaluate the coefficent on the current element
414 ScalarType k_eval = 0.0;
415 for ( int k = 0; k < num_nodes_per_wedge; k++ )
416 {
417 k_eval += shape( k, quad_point ) * k_local_hex[wedge]( k );
418 }
419
420 for ( int k = 0; k < num_nodes_per_wedge; k++ )
421 {
422 sym_grad_i[k] = symmetric_grad( J_inv_transposed, quad_point, k, dimi );
423 sym_grad_j[k] = symmetric_grad( J_inv_transposed, quad_point, k, dimj );
424 }
425 jdet_keval_quadweight = quad_weight * k_eval * abs_det;
426 }
427
428 /// @brief assemble the local matrix and return it for a given element, wedge, and vectorial component
429 /// (determined by dimi, dimj)
430 KOKKOS_INLINE_FUNCTION
432 const int local_subdomain_id,
433 const int x_cell,
434 const int y_cell,
435 const int r_cell,
436 const int wedge ) const
437 {
438 // Gather surface points for each wedge.
439 // TODO gather this for only 1 wedge
441 wedge_surface_physical_coords( wedge_phy_surf, grid_, local_subdomain_id, x_cell, y_cell );
442
443 // Gather wedge radii.
444 const ScalarT r_1 = radii_( local_subdomain_id, r_cell );
445 const ScalarT r_2 = radii_( local_subdomain_id, r_cell + 1 );
446
448 extract_local_wedge_scalar_coefficients( k_local_hex, local_subdomain_id, x_cell, y_cell, r_cell, k_ );
449
450 // Compute the local element matrix.
452 for ( int dimi = 0; dimi < 3; ++dimi )
453 {
454 for ( int dimj = 0; dimj < 3; ++dimj )
455 {
456 // spatial dimensions: quadrature points and wedge
457 for ( int q = 0; q < num_quad_points; q++ )
458 {
461 ScalarType jdet_keval_quadweight = 0;
463 wedge,
464 quad_points[q],
465 quad_weights[q],
466 r_1,
467 r_2,
468 wedge_phy_surf,
469 k_local_hex,
470 dimi,
471 dimj,
472 sym_grad_i,
473 sym_grad_j,
474 jdet_keval_quadweight );
475
476 // propagate on local matrix by outer product of test and trial vecs
477 for ( int i = 0; i < num_nodes_per_wedge; i++ )
478 {
479 for ( int j = 0; j < num_nodes_per_wedge; j++ )
480 {
481 A( i + dimi * num_nodes_per_wedge, j + dimj * num_nodes_per_wedge ) +=
482 jdet_keval_quadweight *
483 ( 2 * sym_grad_j[j].double_contract( sym_grad_i[i] ) -
484 2.0 / 3.0 * sym_grad_j[j]( dimj, dimj ) * sym_grad_i[i]( dimi, dimi ) );
485 // for the div, we just extract the component from the gradient vector
486 }
487 }
488 }
489 }
490 }
491
492 if ( treat_boundary_ )
493 {
495 boundary_mask.fill( 1.0 );
496
497 for ( int dimi = 0; dimi < 3; ++dimi )
498 {
499 for ( int dimj = 0; dimj < 3; ++dimj )
500 {
501 if ( r_cell == 0 )
502 {
503 // Inner boundary (CMB).
504 for ( int i = 0; i < 6; i++ )
505 {
506 for ( int j = 0; j < 6; j++ )
507 {
508 // on diagonal components of the vectorial diffusion operator, we exclude the diagonal entries from elimination
509 if ( ( dimi == dimj && i != j && ( i < 3 || j < 3 ) ) or
510 ( dimi != dimj && ( i < 3 || j < 3 ) ) )
511 {
512 boundary_mask( i + dimi * num_nodes_per_wedge, j + dimj * num_nodes_per_wedge ) =
513 0.0;
514 }
515 }
516 }
517 }
518
519 if ( r_cell + 1 == radii_.extent( 1 ) - 1 )
520 {
521 // Outer boundary (surface).
522 for ( int i = 0; i < 6; i++ )
523 {
524 for ( int j = 0; j < 6; j++ )
525 {
526 // on diagonal components of the vectorial diffusion operator, we exclude the diagonal entries from elimination
527 if ( ( dimi == dimj && i != j && ( i >= 3 || j >= 3 ) ) or
528 ( dimi != dimj && ( i >= 3 || j >= 3 ) ) )
529 {
530 boundary_mask( i + dimi * num_nodes_per_wedge, j + dimj * num_nodes_per_wedge ) =
531 0.0;
532 }
533 }
534 }
535 }
536 }
537 }
538 A.hadamard_product( boundary_mask );
539 }
540
541 return A;
542 }
543
544 // executes the fused local matvec on an element, given the assembled trial and test vectors
545 KOKKOS_INLINE_FUNCTION void fused_local_mv(
546 ScalarType src_local_hex[8],
547 ScalarType dst_local_hex[8],
548 const int wedge,
549 const ScalarType jdet_keval_quadweight,
552 const int dimi,
553 const int dimj,
554 int r_cell ) const
555 {
556 constexpr int offset_x[2][6] = { { 0, 1, 0, 0, 1, 0 }, { 1, 0, 1, 1, 0, 1 } };
557 constexpr int offset_y[2][6] = { { 0, 0, 1, 0, 0, 1 }, { 1, 1, 0, 1, 1, 0 } };
558 constexpr int offset_r[2][6] = { { 0, 0, 0, 1, 1, 1 }, { 0, 0, 0, 1, 1, 1 } };
559
561 ScalarType divu = 0.0;
562 grad_u.fill( 0.0 );
563
564 const bool at_cmb = r_cell == 0;
565 const bool at_surface = r_cell + 1 == radii_.extent( 1 ) - 1;
566 int cmb_shift = 0;
567 int surface_shift = 0;
568
569 // Compute ∇u at this quadrature point.
570 if ( !diagonal_ )
571 {
572 if ( treat_boundary_ && at_cmb )
573 {
574 // at the core-mantle boundary, we exclude dofs that are lower-indexed than the dof on the boundary
575 cmb_shift = 3;
576 }
577 else if ( treat_boundary_ && at_surface )
578 {
579 // at the surface boundary, we exclude dofs that are higher-indexed than the dof on the boundary
580 surface_shift = 3;
581 }
582
583 // accumulate the element-local gradient/divergence of the trial function (loop over columns of local matrix/local dofs)
584 // by dot of trial vec and src dofs
585 for ( int i = 0 + cmb_shift; i < num_nodes_per_wedge - surface_shift; i++ )
586 {
587 grad_u =
588 grad_u +
589 sym_grad_i[i] * src_local_hex[4 * offset_r[wedge][i] + 2 * offset_y[wedge][i] + offset_x[wedge][i]];
590 divu += sym_grad_i[i]( dimi, dimi ) *
591 src_local_hex[4 * offset_r[wedge][i] + 2 * offset_y[wedge][i] + offset_x[wedge][i]];
592 }
593
594 // Add the test function contributions.
595 // for each row of the local matrix (test-functions):
596 // multiply trial part (fully assembled for the current element from loop above) with test part corresponding to the current row/dof
597 // += due to contributions from other elements
598 for ( int j = 0 + cmb_shift; j < num_nodes_per_wedge - surface_shift; j++ )
599 {
600 dst_local_hex[4 * offset_r[wedge][j] + 2 * offset_y[wedge][j] + offset_x[wedge][j]] +=
601 jdet_keval_quadweight * ( 2 * ( sym_grad_j[j] ).double_contract( grad_u ) -
602 2.0 / 3.0 * sym_grad_j[j]( dimj, dimj ) * divu );
603 // for the div, we just extract the component from the gradient vector
604 }
605 }
606
607 // Dirichlet DoFs are only to be eliminated on diagonal blocks of epsilon
608 if ( diagonal_ || ( dimi == dimj && ( treat_boundary_ && ( at_cmb || at_surface ) ) ) )
609 {
610 // for the diagonal elements at the boundary, we switch the shifts
611 for ( int i = 0 + surface_shift; i < num_nodes_per_wedge - cmb_shift; i++ )
612 {
613 const auto grad_u_diag =
614 sym_grad_i[i] * src_local_hex[4 * offset_r[wedge][i] + 2 * offset_y[wedge][i] + offset_x[wedge][i]];
615 const auto div_u_diag =
616 sym_grad_i[i]( dimi, dimi ) *
617 src_local_hex[4 * offset_r[wedge][i] + 2 * offset_y[wedge][i] + offset_x[wedge][i]];
618
619 dst_local_hex[4 * offset_r[wedge][i] + 2 * offset_y[wedge][i] + offset_x[wedge][i]] +=
620 jdet_keval_quadweight * ( 2 * ( sym_grad_j[i] ).double_contract( grad_u_diag ) -
621 2.0 / 3.0 * sym_grad_j[i]( dimj, dimj ) * div_u_diag );
622 }
623 }
624 }
625};
626
629
630} // namespace terra::fe::wedge::operators::shell
Send and receive buffers for all process-local subdomain boundaries.
Definition communication.hpp:56
void operator()(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell) const
Definition epsilon_divdiv.hpp:218
void set_stored_matrix_mode(linalg::OperatorStoredMatrixMode operator_stored_matrix_mode, int level_range, grid::Grid4DDataScalar< ScalarType > GCAElements)
allocates memory for the local matrices
Definition epsilon_divdiv.hpp:122
linalg::OperatorStoredMatrixMode get_stored_matrix_mode()
Definition epsilon_divdiv.hpp:137
bool has_flag(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, grid::shell::ShellBoundaryFlag flag) const
Getter for mask member.
Definition epsilon_divdiv.hpp:113
static constexpr int LocalMatrixDim
Definition epsilon_divdiv.hpp:24
void assemble_trial_test_vecs(const int wedge, const dense::Vec< ScalarType, VecDim > &quad_point, const ScalarType quad_weight, const ScalarT r_1, const ScalarT r_2, dense::Vec< ScalarT, 3 >(*wedge_phy_surf)[3], const dense::Vec< ScalarT, 6 > *k_local_hex, const int dimi, const int dimj, dense::Mat< ScalarType, VecDim, VecDim > *sym_grad_i, dense::Mat< ScalarType, VecDim, VecDim > *sym_grad_j, ScalarType &jdet_keval_quadweight) const
: For both trial and test space this function sets up a vector: each vector element holds the symmetr...
Definition epsilon_divdiv.hpp:394
ScalarT ScalarType
Definition epsilon_divdiv.hpp:23
grid::Grid2DDataScalar< ScalarT > get_radii() const
Getter for radii member.
Definition epsilon_divdiv.hpp:106
EpsilonDivDiv(const grid::shell::DistributedDomain &domain, const grid::Grid3DDataVec< ScalarT, 3 > &grid, const grid::Grid2DDataScalar< ScalarT > &radii, const grid::Grid4DDataScalar< grid::shell::ShellBoundaryFlag > &mask, const grid::Grid4DDataScalar< ScalarT > &k, bool treat_boundary, bool diagonal, linalg::OperatorApplyMode operator_apply_mode=linalg::OperatorApplyMode::Replace, linalg::OperatorCommunicationMode operator_communication_mode=linalg::OperatorCommunicationMode::CommunicateAdditively, linalg::OperatorStoredMatrixMode operator_stored_matrix_mode=linalg::OperatorStoredMatrixMode::Off)
Definition epsilon_divdiv.hpp:58
void fused_local_mv(ScalarType src_local_hex[8], ScalarType dst_local_hex[8], const int wedge, const ScalarType jdet_keval_quadweight, dense::Mat< ScalarType, 3, 3 > *sym_grad_i, dense::Mat< ScalarType, 3, 3 > *sym_grad_j, const int dimi, const int dimj, int r_cell) const
Definition epsilon_divdiv.hpp:545
void set_operator_apply_and_communication_modes(const linalg::OperatorApplyMode operator_apply_mode, const linalg::OperatorCommunicationMode operator_communication_mode)
Definition epsilon_divdiv.hpp:88
const grid::shell::DistributedDomain & get_domain() const
Getter for domain member.
Definition epsilon_divdiv.hpp:103
grid::Grid3DDataVec< ScalarT, 3 > get_grid()
Getter for grid member.
Definition epsilon_divdiv.hpp:109
void set_local_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge, const dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > &mat) const
Set the local matrix stored in the operator.
Definition epsilon_divdiv.hpp:141
dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > assemble_local_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge) const
assemble the local matrix and return it for a given element, wedge, and vectorial component (determin...
Definition epsilon_divdiv.hpp:431
void apply_impl(const SrcVectorType &src, DstVectorType &dst)
Definition epsilon_divdiv.hpp:179
dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > get_local_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge) const
Retrives the local matrix if there is stored local matrices, the desired local matrix is loaded and r...
Definition epsilon_divdiv.hpp:157
terra::grid::Grid4DDataMatrices< ScalarType, LocalMatrixDim, LocalMatrixDim, 2 > Grid4DDataLocalMatrices
Definition epsilon_divdiv.hpp:25
void set_diagonal(bool v)
S/Getter for diagonal member.
Definition epsilon_divdiv.hpp:97
const grid::Grid4DDataScalar< ScalarType > & k_grid_data()
Getter for coefficient.
Definition epsilon_divdiv.hpp:100
Parallel data structure organizing the thick spherical shell metadata for distributed (MPI parallel) ...
Definition spherical_shell.hpp:2518
Static assertion: VectorQ1Scalar satisfies VectorLike concept.
Definition vector_q1.hpp:168
const grid::Grid4DDataVec< ScalarType, VecDim > & grid_data() const
Get const reference to grid data.
Definition vector_q1.hpp:288
bool has_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge) const
Checks for presence of a local matrix for a certain element.
Definition local_matrix_storage.hpp:223
dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > get_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge) const
Retrives the local matrix if there is stored local matrices, the desired local matrix is loaded and r...
Definition local_matrix_storage.hpp:175
void set_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge, dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > mat) const
Set the local matrix stored in the operator.
Definition local_matrix_storage.hpp:118
Timer supporting RAII scope or manual stop.
Definition timer.hpp:342
void stop()
Stop the timer and record elapsed time.
Definition timer.hpp:364
Concept for types that can be used as Galerkin coarse-grid operators in a multigrid hierarchy....
Definition operator.hpp:81
void unpack_and_reduce_local_subdomain_boundaries(const grid::shell::DistributedDomain &domain, const GridDataType &data, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &boundary_recv_buffers, CommunicationReduction reduction=CommunicationReduction::SUM)
Unpacks and reduces local subdomain boundaries.
Definition communication.hpp:672
void pack_send_and_recv_local_subdomain_boundaries(const grid::shell::DistributedDomain &domain, const GridDataType &data, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &boundary_send_buffers, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &boundary_recv_buffers)
Packs, sends and recvs local subdomain boundaries using two sets of buffers.
Definition communication.hpp:242
Definition boundary_mass.hpp:14
constexpr void quad_felippa_3x2_quad_weights(T(&quad_weights)[quad_felippa_3x2_num_quad_points])
Definition wedge/quadrature/quadrature.hpp:93
constexpr int quad_felippa_3x2_num_quad_points
Definition wedge/quadrature/quadrature.hpp:66
constexpr void quad_felippa_3x2_quad_points(dense::Vec< T, 3 >(&quad_points)[quad_felippa_3x2_num_quad_points])
Definition wedge/quadrature/quadrature.hpp:70
constexpr int num_nodes_per_wedge_surface
Definition kernel_helpers.hpp:6
constexpr dense::Mat< T, 3, 3 > symmetric_grad(const dense::Mat< T, 3, 3 > &J_inv_transposed, const dense::Vec< T, 3 > &quad_point, const int dof, const int dim)
Returns the symmetric gradient of the shape function of a dof at a quadrature point.
Definition integrands.hpp:685
void wedge_surface_physical_coords(dense::Vec< T, 3 >(&wedge_surf_phy_coords)[num_wedges_per_hex_cell][num_nodes_per_wedge_surface], const grid::Grid3DDataVec< T, 3 > &lateral_grid, const int local_subdomain_id, const int x_cell, const int y_cell)
Extracts the (unit sphere) surface vertex coords of the two wedges of a hex cell.
Definition kernel_helpers.hpp:26
void atomically_add_local_wedge_vector_coefficients(const grid::Grid4DDataVec< T, VecDim > &global_coefficients, const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int d, const dense::Vec< T, 6 > local_coefficients[2])
Performs an atomic add of the two local wedge coefficient vectors of a hex cell into the global coeff...
Definition kernel_helpers.hpp:465
constexpr int num_wedges_per_hex_cell
Definition kernel_helpers.hpp:5
void extract_local_wedge_scalar_coefficients(dense::Vec< T, 6 >(&local_coefficients)[2], const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const grid::Grid4DDataScalar< T > &global_coefficients)
Extracts the local vector coefficients for the two wedges of a hex cell from the global coefficient v...
Definition kernel_helpers.hpp:306
void extract_local_wedge_vector_coefficients(dense::Vec< T, 6 >(&local_coefficients)[2], const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int d, const grid::Grid4DDataVec< T, VecDim > &global_coefficients)
Extracts the local vector coefficients for the two wedges of a hex cell from the global coefficient v...
Definition kernel_helpers.hpp:356
constexpr int num_nodes_per_wedge
Definition kernel_helpers.hpp:7
constexpr T shape(const int node_idx, const T xi, const T eta, const T zeta)
(Tensor-product) Shape function.
Definition integrands.hpp:146
constexpr dense::Mat< T, 3, 3 > jac(const dense::Vec< T, 3 > &p1_phy, const dense::Vec< T, 3 > &p2_phy, const dense::Vec< T, 3 > &p3_phy, const T r_1, const T r_2, const T xi, const T eta, const T zeta)
Definition integrands.hpp:657
ShellBoundaryFlag
FlagLike that indicates boundary types for the thick spherical shell.
Definition shell/bit_masks.hpp:12
Kokkos::MDRangePolicy< Kokkos::Rank< 4 > > local_domain_md_range_policy_cells(const DistributedDomain &distributed_domain)
Definition spherical_shell.hpp:2739
Kokkos::View< dense::Mat< ScalarType, Rows, Cols > ****[NumMatrices], Layout > Grid4DDataMatrices
Definition grid_types.hpp:173
Kokkos::View< ScalarType ***[VecDim], Layout > Grid3DDataVec
Definition grid_types.hpp:42
Kokkos::View< ScalarType ****, Layout > Grid4DDataScalar
Definition grid_types.hpp:27
Kokkos::View< ScalarType **, Layout > Grid2DDataScalar
Definition grid_types.hpp:21
OperatorApplyMode
Modes for applying an operator to a vector.
Definition operator.hpp:30
@ Replace
Overwrite the destination vector.
OperatorStoredMatrixMode
Modes for applying stored matrices.
Definition operator.hpp:47
@ Selective
Use stored matrices on selected, marked elements only, assemble on all others.
@ Full
Use stored matrices on all elements.
@ Off
Do not use stored matrices.
OperatorCommunicationMode
Modes for communication during operator application.
Definition operator.hpp:40
@ CommunicateAdditively
Communicate and add results.
constexpr bool has_flag(E mask_value, E flag) noexcept
Checks if a bitmask value contains a specific flag.
Definition bit_masking.hpp:43
Definition mat.hpp:10
void fill(const T value)
Definition mat.hpp:201
constexpr Mat diagonal() const
Definition mat.hpp:377
Mat & hadamard_product(const Mat &mat)
Definition mat.hpp:213
T double_contract(const Mat &mat)
Definition mat.hpp:226
SoA (Structure-of-Arrays) 4D vector grid data.
Definition grid_types.hpp:51
auto extent(int i) const
Definition grid_types.hpp:75