Loading...
Searching...
No Matches
inv_rho_drho_dt.hpp
Go to the documentation of this file.
1#pragma once
2
3#include "../../quadrature/quadrature.hpp"
9#include "linalg/operator.hpp"
10#include "linalg/vector_q1.hpp"
11
12namespace terra::linalg {
14enum class OperatorApplyMode;
15}
17
18/// \brief Linear form for the PDA temporal compressibility term in compressible Stokes.
19///
20/// Given scalar FE functions \f$\rho\f$ (density) and \f$\dot\rho\f$ (its previously computed
21/// time derivative), this linear form evaluates
22/// \f[
23/// f_i = \int_\Omega \frac{1}{\rho} \dot\rho \, \phi_i \, \mathrm{d}x
24/// \f]
25/// into a scalar finite element coefficient vector, where \f$\phi_i\f$ are the scalar Q1 test
26/// functions on the spherical shell mesh.
27///
28/// This is the temporal part of the right-hand side of the mass conservation equation in the
29/// Projected Density Approximation (PDA) for compressible Stokes flow. The time derivative
30/// \f$\dot\rho\f$ is expected to be pre-computed (e.g. via a first- or second-order BDF
31/// scheme) and stored as a scalar FE coefficient vector before calling `apply`. See the
32/// [Stokes documentation](@ref stokes-compressible) for the full context.
33///
34/// \note The sign convention at the call site depends on the (2,1) block of the Stokes operator.
35/// The `Divergence` block computes \f$-(q, \mathrm{div}\, u)\f$, so the mass conservation
36/// equation \f$-(q, \mathrm{div}\, u) = f_p\f$ requires
37/// \f$f_p = +\frac{1}{\rho}\dot\rho\f$ term (positive sign).
38///
39/// \note \f$\rho\f$ must not vanish in the domain; no singular-value protection is applied.
40///
41/// **Concept.** This class satisfies \ref terra::linalg::LinearFormLike. Evaluation writes
42/// the assembled coefficient vector into \p dst via `linalg::apply`:
43///
44/// \code{.cpp}
45/// InvRhoDrhoDt< double > L( domain, grid, radii, rho, drho_dt );
46/// linalg::VectorQ1Scalar< double > g( domain );
47/// linalg::apply( L, g ); // fills g_i = ∫ (1/ρ) ρ̇ φ_i dx
48/// \endcode
49///
50/// The default `OperatorApplyMode::Replace` zeroes \p dst before accumulation. Pass
51/// `OperatorApplyMode::Add` to add into an existing vector instead.
52///
53template < typename ScalarT >
55{
56 public:
58 using ScalarType = ScalarT;
59
60 private:
62
65
68
69 linalg::OperatorApplyMode operator_apply_mode_;
70 linalg::OperatorCommunicationMode operator_communication_mode_;
71
74
75 // Kokkos views set in apply_impl() before the parallel launch.
79
80 public:
88 const linalg::OperatorCommunicationMode operator_communication_mode =
90 : domain_( domain )
91 , grid_( grid )
92 , radii_( radii )
93 , rho_( rho )
94 , drho_dt_( drho_dt )
95 , operator_apply_mode_( operator_apply_mode )
96 , operator_communication_mode_( operator_communication_mode )
97 , send_buffers_( domain )
98 , recv_buffers_( domain )
99 {}
100
102 {
103 if ( operator_apply_mode_ == linalg::OperatorApplyMode::Replace )
104 {
105 assign( dst, 0 );
106 }
107
108 dst_ = dst.grid_data();
109 rho_grid_ = rho_.grid_data();
110 drho_dt_grid_ = drho_dt_.grid_data();
111
112 Kokkos::parallel_for(
113 "inv_rho_drho_dt", grid::shell::local_domain_md_range_policy_cells( domain_ ), *this );
114 Kokkos::fence();
115
116 if ( operator_communication_mode_ == linalg::OperatorCommunicationMode::CommunicateAdditively )
117 {
119 domain_, dst_, send_buffers_, recv_buffers_ );
121 }
122 }
123
124 /// \brief Kokkos kernel: per-cell contribution to
125 /// \f$ f_i = \int_E \frac{1}{\rho} \dot\rho \, \phi_i \, \mathrm{d}x \f$.
126 KOKKOS_INLINE_FUNCTION void
127 operator()( const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell ) const
128 {
129 // -----------------------------------------------------------------------
130 // Geometry
131 // -----------------------------------------------------------------------
133 wedge_surface_physical_coords( wedge_phy_surf, grid_, local_subdomain_id, x_cell, y_cell );
134
135 const ScalarT r_1 = radii_( local_subdomain_id, r_cell );
136 const ScalarT r_2 = radii_( local_subdomain_id, r_cell + 1 );
137
138 // -----------------------------------------------------------------------
139 // Quadrature
140 // -----------------------------------------------------------------------
141 constexpr auto num_quad_points = quadrature::quad_felippa_3x2_num_quad_points;
142
143 dense::Vec< ScalarT, 3 > quad_points[num_quad_points];
144 ScalarT quad_weights[num_quad_points];
145
148
149 // -----------------------------------------------------------------------
150 // Extract local coefficients: rho and drho_dt (both scalar)
151 // -----------------------------------------------------------------------
154
156 rho_coeffs, local_subdomain_id, x_cell, y_cell, r_cell, rho_grid_ );
158 drho_dt_coeffs, local_subdomain_id, x_cell, y_cell, r_cell, drho_dt_grid_ );
159
160 // -----------------------------------------------------------------------
161 // Per-wedge local contributions
162 // -----------------------------------------------------------------------
164
165 for ( int q = 0; q < num_quad_points; q++ )
166 {
167 const ScalarT w = quad_weights[q];
168
169 for ( int wedge = 0; wedge < num_wedges_per_hex_cell; wedge++ )
170 {
171 const auto J = jac( wedge_phy_surf[wedge], r_1, r_2, quad_points[q] );
172 const auto det = Kokkos::abs( J.det() );
173
174 // ----------------------------------------------------------------
175 // Interpolate rho and drho_dt at quad pt
176 // ----------------------------------------------------------------
177 ScalarT rho_q = 0;
178 ScalarT drho_dt_q = 0;
179
180 for ( int j = 0; j < num_nodes_per_wedge; j++ )
181 {
182 const ScalarT phi_j = shape( j, quad_points[q] );
183 rho_q += rho_coeffs[wedge]( j ) * phi_j;
184 drho_dt_q += drho_dt_coeffs[wedge]( j ) * phi_j;
185 }
186
187 // ----------------------------------------------------------------
188 // Integrand: (1/rho) * drho_dt
189 // ----------------------------------------------------------------
190 const ScalarT integrand = drho_dt_q / rho_q;
191
192 // ----------------------------------------------------------------
193 // Accumulate test-function contributions
194 // ----------------------------------------------------------------
195 for ( int i = 0; i < num_nodes_per_wedge; i++ )
196 {
197 contrib[wedge]( i ) += w * integrand * shape( i, quad_points[q] ) * det;
198 }
199 }
200 }
201
202 atomically_add_local_wedge_scalar_coefficients( dst_, local_subdomain_id, x_cell, y_cell, r_cell, contrib );
203 }
204};
205
207
208} // namespace terra::fe::wedge::linearforms::shell
Linear form for the PDA temporal compressibility term in compressible Stokes.
Definition inv_rho_drho_dt.hpp:55
void apply_impl(DstVectorType &dst)
Definition inv_rho_drho_dt.hpp:101
void operator()(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell) const
Kokkos kernel: per-cell contribution to .
Definition inv_rho_drho_dt.hpp:127
ScalarT ScalarType
Definition inv_rho_drho_dt.hpp:58
InvRhoDrhoDt(const grid::shell::DistributedDomain &domain, const grid::Grid3DDataVec< ScalarT, 3 > &grid, const grid::Grid2DDataScalar< ScalarT > &radii, const linalg::VectorQ1Scalar< ScalarT > &rho, const linalg::VectorQ1Scalar< ScalarT > &drho_dt, const linalg::OperatorApplyMode operator_apply_mode=linalg::OperatorApplyMode::Replace, const linalg::OperatorCommunicationMode operator_communication_mode=linalg::OperatorCommunicationMode::CommunicateAdditively)
Definition inv_rho_drho_dt.hpp:81
Parallel data structure organizing the thick spherical shell metadata for distributed (MPI parallel) ...
Definition spherical_shell.hpp:2518
Q1 scalar finite element vector on a distributed shell grid.
Definition vector_q1.hpp:21
const grid::Grid4DDataScalar< ScalarType > & grid_data() const
Get const reference to grid data.
Definition vector_q1.hpp:139
Concept for types that behave like linear forms.
Definition linear_form.hpp:22
void unpack_and_reduce_local_subdomain_boundaries(const grid::shell::DistributedDomain &domain, const GridDataType &data, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &boundary_recv_buffers, CommunicationReduction reduction=CommunicationReduction::SUM)
Unpacks and reduces local subdomain boundaries.
Definition communication.hpp:672
void pack_send_and_recv_local_subdomain_boundaries(const grid::shell::DistributedDomain &domain, const GridDataType &data, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &boundary_send_buffers, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &boundary_recv_buffers)
Packs, sends and recvs local subdomain boundaries using two sets of buffers.
Definition communication.hpp:242
Definition inv_rho_drho_dt.hpp:16
constexpr void quad_felippa_3x2_quad_weights(T(&quad_weights)[quad_felippa_3x2_num_quad_points])
Definition wedge/quadrature/quadrature.hpp:93
constexpr int quad_felippa_3x2_num_quad_points
Definition wedge/quadrature/quadrature.hpp:66
constexpr void quad_felippa_3x2_quad_points(dense::Vec< T, 3 >(&quad_points)[quad_felippa_3x2_num_quad_points])
Definition wedge/quadrature/quadrature.hpp:70
constexpr int num_nodes_per_wedge_surface
Definition kernel_helpers.hpp:6
void atomically_add_local_wedge_scalar_coefficients(const grid::Grid4DDataScalar< T > &global_coefficients, const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const dense::Vec< T, 6 >(&local_coefficients)[2])
Performs an atomic add of the two local wedge coefficient vectors of a hex cell into the global coeff...
Definition kernel_helpers.hpp:407
void wedge_surface_physical_coords(dense::Vec< T, 3 >(&wedge_surf_phy_coords)[num_wedges_per_hex_cell][num_nodes_per_wedge_surface], const grid::Grid3DDataVec< T, 3 > &lateral_grid, const int local_subdomain_id, const int x_cell, const int y_cell)
Extracts the (unit sphere) surface vertex coords of the two wedges of a hex cell.
Definition kernel_helpers.hpp:26
constexpr int num_wedges_per_hex_cell
Definition kernel_helpers.hpp:5
void extract_local_wedge_scalar_coefficients(dense::Vec< T, 6 >(&local_coefficients)[2], const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const grid::Grid4DDataScalar< T > &global_coefficients)
Extracts the local vector coefficients for the two wedges of a hex cell from the global coefficient v...
Definition kernel_helpers.hpp:306
constexpr int num_nodes_per_wedge
Definition kernel_helpers.hpp:7
constexpr T shape(const int node_idx, const T xi, const T eta, const T zeta)
(Tensor-product) Shape function.
Definition integrands.hpp:146
constexpr dense::Mat< T, 3, 3 > jac(const dense::Vec< T, 3 > &p1_phy, const dense::Vec< T, 3 > &p2_phy, const dense::Vec< T, 3 > &p3_phy, const T r_1, const T r_2, const T xi, const T eta, const T zeta)
Definition integrands.hpp:657
Kokkos::MDRangePolicy< Kokkos::Rank< 4 > > local_domain_md_range_policy_cells(const DistributedDomain &distributed_domain)
Definition spherical_shell.hpp:2739
Kokkos::View< ScalarType ***[VecDim], Layout > Grid3DDataVec
Definition grid_types.hpp:42
Kokkos::View< ScalarType ****, Layout > Grid4DDataScalar
Definition grid_types.hpp:27
Kokkos::View< ScalarType **, Layout > Grid2DDataScalar
Definition grid_types.hpp:21
Contains linear algebra utilities and functions for the Terra project.
Definition inv_rho_drho_dt.hpp:12
OperatorApplyMode
Modes for applying an operator to a vector.
Definition operator.hpp:30
@ Replace
Overwrite the destination vector.
OperatorCommunicationMode
Modes for communication during operator application.
Definition operator.hpp:40
@ CommunicateAdditively
Communicate and add results.