Loading...
Searching...
No Matches
divergence.hpp
Go to the documentation of this file.
1
2#pragma once
3
4#include "../../quadrature/quadrature.hpp"
7#include "dense/vec.hpp"
11#include "linalg/operator.hpp"
13#include "linalg/vector.hpp"
14#include "linalg/vector_q1.hpp"
15#include "util/timer.hpp"
16
18
29
30template < typename ScalarT >
32{
33 public:
36 using ScalarType = ScalarT;
37
38 private:
40 grid::shell::DistributedDomain domain_coarse_;
41
45
46 BoundaryConditions bcs_;
47
48 linalg::OperatorApplyMode operator_apply_mode_;
49 linalg::OperatorCommunicationMode operator_communication_mode_;
50
53
56
57 public:
59 const grid::shell::DistributedDomain& domain_fine,
60 const grid::shell::DistributedDomain& domain_coarse,
61 const grid::Grid3DDataVec< ScalarT, 3 >& grid_fine,
62 const grid::Grid2DDataScalar< ScalarT >& radii_fine,
64 BoundaryConditions bcs,
66 linalg::OperatorCommunicationMode operator_communication_mode =
68 : domain_fine_( domain_fine )
69 , domain_coarse_( domain_coarse )
70 , grid_fine_( grid_fine )
71 , radii_( radii_fine )
72 , boundary_mask_fine_( boundary_mask_fine )
73 , operator_apply_mode_( operator_apply_mode )
74 , operator_communication_mode_( operator_communication_mode )
75 , recv_buffers_( domain_coarse )
76 , comm_plan_( domain_coarse )
77 {
78 bcs_[0] = bcs[0];
79 bcs_[1] = bcs[1];
80 }
81
83 const linalg::OperatorApplyMode operator_apply_mode,
84 const linalg::OperatorCommunicationMode operator_communication_mode )
85 {
86 operator_apply_mode_ = operator_apply_mode;
87 operator_communication_mode_ = operator_communication_mode;
88 }
89
90 void apply_impl( const SrcVectorType& src, DstVectorType& dst )
91 {
92 util::Timer timer_apply( "divergence_apply" );
93
94 if ( operator_apply_mode_ == linalg::OperatorApplyMode::Replace )
95 {
96 assign( dst, 0 );
97 }
98
99 src_ = src.grid_data();
100 dst_ = dst.grid_data();
101
102 util::Timer timer_kernel( "divergence_kernel" );
103 Kokkos::parallel_for( "matvec", grid::shell::local_domain_md_range_policy_cells( domain_fine_ ), *this );
104 Kokkos::fence();
105 timer_kernel.stop();
106
107 if ( operator_communication_mode_ == linalg::OperatorCommunicationMode::CommunicateAdditively )
108 {
109 util::Timer timer_comm( "divergence_comm" );
110 terra::communication::shell::send_recv_with_plan( comm_plan_, dst_, recv_buffers_ );
111 }
112 }
113
114 KOKKOS_INLINE_FUNCTION void
115 operator()( const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell ) const
116 {
117 // Gather surface points for each wedge.
119 wedge_surface_physical_coords( wedge_phy_surf, grid_fine_, local_subdomain_id, x_cell, y_cell );
120
121 // Gather wedge radii.
122 const ScalarT r_1 = radii_( local_subdomain_id, r_cell );
123 const ScalarT r_2 = radii_( local_subdomain_id, r_cell + 1 );
124
125 // Quadrature points.
126 constexpr auto num_quad_points = quadrature::quad_felippa_3x2_num_quad_points;
127
128 dense::Vec< ScalarT, 3 > quad_points[num_quad_points];
129 ScalarT quad_weights[num_quad_points];
130
133
134 const int fine_radial_wedge_index = r_cell % 2;
135
136 // Compute the local element matrix.
138
139 for ( int q = 0; q < num_quad_points; q++ )
140 {
141 for ( int wedge = 0; wedge < num_wedges_per_hex_cell; wedge++ )
142 {
143 const int fine_lateral_wedge_index = fine_lateral_wedge_idx( x_cell, y_cell, wedge );
144
145 const auto J = jac( wedge_phy_surf[wedge], r_1, r_2, quad_points[q] );
146 const auto det = Kokkos::abs( J.det() );
147 const auto J_inv_transposed = J.inv().transposed();
148
149 for ( int i = 0; i < num_nodes_per_wedge; i++ )
150 {
151 const auto shape_i =
152 shape_coarse( i, fine_radial_wedge_index, fine_lateral_wedge_index, quad_points[q] );
153
154 for ( int j = 0; j < num_nodes_per_wedge; j++ )
155 {
156 const auto grad_j = grad_shape( j, quad_points[q] );
157
158 for ( int d = 0; d < 3; d++ )
159 {
160 A[wedge]( i, d * 6 + j ) +=
161 quad_weights[q] * ( -( J_inv_transposed * grad_j )(d) *shape_i * det );
162 }
163 }
164 }
165 }
166 }
167
168 bool at_cmb = util::has_flag( boundary_mask_fine_( local_subdomain_id, x_cell, y_cell, r_cell ), CMB );
169 bool at_surface =
170 util::has_flag( boundary_mask_fine_( local_subdomain_id, x_cell, y_cell, r_cell + 1 ), SURFACE );
171
173 for ( int d = 0; d < 3; d++ )
174 {
176 extract_local_wedge_vector_coefficients( src_d, local_subdomain_id, x_cell, y_cell, r_cell, d, src_ );
177
178 for ( int wedge = 0; wedge < num_wedges_per_hex_cell; wedge++ )
179 {
180 for ( int i = 0; i < num_nodes_per_wedge; i++ )
181 {
182 src[wedge]( d * 6 + i ) = src_d[wedge]( i );
183 }
184 }
185 }
186
187 // Boundary treatment
188 dense::Mat< ScalarT, 6, 18 > boundary_mask;
189 boundary_mask.fill( 1.0 );
190
192
193 if ( at_cmb || at_surface )
194 {
195 // Inner boundary (CMB).
196 ShellBoundaryFlag sbf = at_cmb ? CMB : SURFACE;
197 BoundaryConditionFlag bcf = get_boundary_condition_flag( bcs_, sbf );
198
199 if ( bcf == DIRICHLET )
200 {
201 for ( int dimj = 0; dimj < 3; ++dimj )
202 {
203 for ( int i = 0; i < num_nodes_per_wedge; i++ )
204 {
205 for ( int j = 0; j < num_nodes_per_wedge; j++ )
206 {
207 if ( ( at_cmb && ( j < 3 ) ) || ( at_surface && ( j >= 3 ) ) )
208 {
209 boundary_mask( i, dimj * num_nodes_per_wedge + j ) = 0.0;
210 }
211 }
212 }
213 }
214 }
215 else if ( bcf == FREESLIP )
216 {
218
219 // reorder source dofs for nodes instead of velocity dims in src vector and local matrix
220 for ( int wedge = 0; wedge < 2; ++wedge )
221 {
222 for ( int node_idxi = 0; node_idxi < num_nodes_per_wedge; node_idxi++ )
223 {
224 for ( int dimj = 0; dimj < 3; ++dimj )
225 {
226 for ( int node_idxj = 0; node_idxj < num_nodes_per_wedge; node_idxj++ )
227 {
228 A_tmp[wedge]( node_idxi, node_idxj * 3 + dimj ) =
229 A[wedge]( node_idxi, node_idxj + dimj * num_nodes_per_wedge );
230 }
231 }
232 }
234 }
235
236 // assemble rotation matrices for boundary nodes
237 // e.g. if we are at CMB, we need to rotate DoFs 0, 1, 2 of each wedge
238 // at SURFACE, we need to rotate DoFs 3, 4, 5
239
240 constexpr int layer_hex_offset_x[2][3] = { { 0, 1, 0 }, { 1, 0, 1 } };
241 constexpr int layer_hex_offset_y[2][3] = { { 0, 0, 1 }, { 1, 1, 0 } };
242
243 for ( int wedge = 0; wedge < 2; ++wedge )
244 {
245 // make rotation matrix unity
246 for ( int i = 0; i < 18; ++i )
247 {
248 R[wedge]( i, i ) = 1.0;
249 }
250
251 for ( int boundary_node_idx = 0; boundary_node_idx < 3; boundary_node_idx++ )
252 {
253 // compute normal
255 local_subdomain_id,
256 x_cell + layer_hex_offset_x[wedge][boundary_node_idx],
257 y_cell + layer_hex_offset_y[wedge][boundary_node_idx],
258 r_cell + ( at_cmb ? 0 : 1 ),
259 grid_fine_,
260 radii_ );
261
262 // compute rotation matrix for DoFs on current node
263 auto R_i = trafo_mat_cartesian_to_normal_tangential( normal );
264
265 // insert into wedge-local rotation matrix
266 int offset_in_R = at_cmb ? 0 : 9;
267 for ( int dimi = 0; dimi < 3; ++dimi )
268 {
269 for ( int dimj = 0; dimj < 3; ++dimj )
270 {
271 R[wedge](
272 offset_in_R + boundary_node_idx * 3 + dimi,
273 offset_in_R + boundary_node_idx * 3 + dimj ) = R_i( dimi, dimj );
274 }
275 }
276 }
277
278 // transform local matrix to rotated/ normal-tangential space: pre/post multiply with rotation matrices
279 // TODO transpose this way?
280 A[wedge] = A_tmp[wedge] * R[wedge].transposed();
281 // transform source dofs to nt-space
282 auto src_tmp = R[wedge] * src[wedge];
283 for ( int i = 0; i < 18; ++i )
284 {
285 src[wedge]( i ) = src_tmp( i );
286 }
287
288 // eliminate normal components: Dirichlet on the normal-tangential system
289 int node_start = at_surface ? 3 : 0;
290 int node_end = at_surface ? 6 : 3;
291 for ( int node_idx = node_start; node_idx < node_end; node_idx++ )
292 {
293 int idx = node_idx * 3;
294 for ( int k = 0; k < 6; ++k )
295 {
296 boundary_mask( k, idx ) = 0.0;
297 }
298 }
299 }
300 }
301 else if ( bcf == NEUMANN ) {}
302 }
303
304 // apply boundary mask
305 for ( int wedge = 0; wedge < num_wedges_per_hex_cell; wedge++ )
306 {
307 A[wedge].hadamard_product( boundary_mask );
308 }
309
311
312 dst[0] = A[0] * src[0];
313 dst[1] = A[1] * src[1];
314
315 // no need to reorder or post trafo the pressure:
316 // independent of dof ordering, div ops map to the same 6 coarse-grid pressure dofs in the same orderinng
317
319 dst_, local_subdomain_id, x_cell / 2, y_cell / 2, r_cell / 2, dst );
320 }
321};
322
324
325} // namespace terra::fe::wedge::operators::shell
Definition communication_plan.hpp:33
Divergence(const grid::shell::DistributedDomain &domain_fine, const grid::shell::DistributedDomain &domain_coarse, const grid::Grid3DDataVec< ScalarT, 3 > &grid_fine, const grid::Grid2DDataScalar< ScalarT > &radii_fine, const grid::Grid4DDataScalar< grid::shell::ShellBoundaryFlag > &boundary_mask_fine, BoundaryConditions bcs, linalg::OperatorApplyMode operator_apply_mode=linalg::OperatorApplyMode::Replace, linalg::OperatorCommunicationMode operator_communication_mode=linalg::OperatorCommunicationMode::CommunicateAdditively)
Definition divergence.hpp:58
void set_operator_apply_and_communication_modes(const linalg::OperatorApplyMode operator_apply_mode, const linalg::OperatorCommunicationMode operator_communication_mode)
Definition divergence.hpp:82
void apply_impl(const SrcVectorType &src, DstVectorType &dst)
Definition divergence.hpp:90
ScalarT ScalarType
Definition divergence.hpp:36
void operator()(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell) const
Definition divergence.hpp:115
Parallel data structure organizing the thick spherical shell metadata for distributed (MPI parallel) ...
Definition spherical_shell.hpp:2518
Q1 scalar finite element vector on a distributed shell grid.
Definition vector_q1.hpp:21
const grid::Grid4DDataScalar< ScalarType > & grid_data() const
Get const reference to grid data.
Definition vector_q1.hpp:139
const grid::Grid4DDataVec< ScalarType, VecDim > & grid_data() const
Get const reference to grid data.
Definition vector_q1.hpp:288
Timer supporting RAII scope or manual stop.
Definition timer.hpp:342
void stop()
Stop the timer and record elapsed time.
Definition timer.hpp:364
Concept for types that behave like linear operators.
Definition operator.hpp:57
void send_recv_with_plan(const ShellBoundaryCommPlan< GridDataType > &plan, const GridDataType &data, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &recv_buffers, CommunicationReduction reduction=CommunicationReduction::SUM)
Definition communication_plan.hpp:652
Definition boundary_mass.hpp:14
constexpr void quad_felippa_3x2_quad_weights(T(&quad_weights)[quad_felippa_3x2_num_quad_points])
Definition wedge/quadrature/quadrature.hpp:93
constexpr int quad_felippa_3x2_num_quad_points
Definition wedge/quadrature/quadrature.hpp:66
constexpr void quad_felippa_3x2_quad_points(dense::Vec< T, 3 >(&quad_points)[quad_felippa_3x2_num_quad_points])
Definition wedge/quadrature/quadrature.hpp:70
constexpr int num_nodes_per_wedge_surface
Definition kernel_helpers.hpp:6
void atomically_add_local_wedge_scalar_coefficients(const grid::Grid4DDataScalar< T > &global_coefficients, const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const dense::Vec< T, 6 >(&local_coefficients)[2])
Performs an atomic add of the two local wedge coefficient vectors of a hex cell into the global coeff...
Definition kernel_helpers.hpp:407
constexpr int fine_lateral_wedge_idx(const int x_cell_fine, const int y_cell_fine, const int wedge_idx_fine)
Returns the lateral wedge index with respect to a coarse grid wedge from the fine wedge indices.
Definition kernel_helpers.hpp:601
void wedge_surface_physical_coords(dense::Vec< T, 3 >(&wedge_surf_phy_coords)[num_wedges_per_hex_cell][num_nodes_per_wedge_surface], const grid::Grid3DDataVec< T, 3 > &lateral_grid, const int local_subdomain_id, const int x_cell, const int y_cell)
Extracts the (unit sphere) surface vertex coords of the two wedges of a hex cell.
Definition kernel_helpers.hpp:26
constexpr void reorder_local_dofs(const DoFOrdering doo_from, const DoFOrdering doo_to, dense::Vec< ScalarT, 18 > &dofs)
Definition kernel_helpers.hpp:619
constexpr T shape_coarse(const int coarse_node_idx, const int fine_radial_wedge_idx, const int fine_lateral_wedge_idx, const T xi_fine, const T eta_fine, const T zeta_fine)
Definition integrands.hpp:373
constexpr int num_wedges_per_hex_cell
Definition kernel_helpers.hpp:5
void extract_local_wedge_vector_coefficients(dense::Vec< T, 6 >(&local_coefficients)[2], const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int d, const grid::Grid4DDataVec< T, VecDim > &global_coefficients)
Extracts the local vector coefficients for the two wedges of a hex cell from the global coefficient v...
Definition kernel_helpers.hpp:356
constexpr int num_nodes_per_wedge
Definition kernel_helpers.hpp:7
constexpr dense::Vec< T, 3 > grad_shape(const int node_idx, const T xi, const T eta, const T zeta)
Gradient of the full shape function:
Definition integrands.hpp:228
constexpr dense::Mat< T, 3, 3 > jac(const dense::Vec< T, 3 > &p1_phy, const dense::Vec< T, 3 > &p2_phy, const dense::Vec< T, 3 > &p3_phy, const T r_1, const T r_2, const T xi, const T eta, const T zeta)
Definition integrands.hpp:657
dense::Vec< typename CoordsShellType::value_type, 3 > coords(const int subdomain, const int x, const int y, const int r, const CoordsShellType &coords_shell, const CoordsRadiiType &coords_radii)
Definition spherical_shell.hpp:2871
BoundaryConditionMapping[2] BoundaryConditions
Definition shell/bit_masks.hpp:37
ShellBoundaryFlag
FlagLike that indicates boundary types for the thick spherical shell.
Definition shell/bit_masks.hpp:12
Kokkos::MDRangePolicy< Kokkos::Rank< 4 > > local_domain_md_range_policy_cells(const DistributedDomain &distributed_domain)
Definition spherical_shell.hpp:2739
BoundaryConditionFlag get_boundary_condition_flag(const BoundaryConditions bcs, ShellBoundaryFlag sbf)
Retrieve the boundary condition flag that is associated with a location in the shell e....
Definition shell/bit_masks.hpp:42
BoundaryConditionFlag
FlagLike that indicates the type of boundary condition
Definition shell/bit_masks.hpp:25
Kokkos::View< ScalarType ***[VecDim], Layout > Grid3DDataVec
Definition grid_types.hpp:42
Kokkos::View< ScalarType ****, Layout > Grid4DDataScalar
Definition grid_types.hpp:27
Kokkos::View< ScalarType **, Layout > Grid2DDataScalar
Definition grid_types.hpp:21
dense::Mat< ScalarType, 3, 3 > trafo_mat_cartesian_to_normal_tangential(const dense::Vec< ScalarType, 3 > &n_input)
Constructs a robust orthonormal transformation matrix from Cartesian to (normal–tangential–tangential...
Definition local_basis_trafo_normal_tangential.hpp:36
OperatorApplyMode
Modes for applying an operator to a vector.
Definition operator.hpp:30
@ Replace
Overwrite the destination vector.
OperatorCommunicationMode
Modes for communication during operator application.
Definition operator.hpp:40
@ CommunicateAdditively
Communicate and add results.
constexpr bool has_flag(E mask_value, E flag) noexcept
Checks if a bitmask value contains a specific flag.
Definition bit_masking.hpp:43
Definition mat.hpp:10
void fill(const T value)
Definition mat.hpp:201
constexpr Mat< T, Cols, Rows > transposed() const
Definition mat.hpp:187
Mat & hadamard_product(const Mat &mat)
Definition mat.hpp:213