Loading...
Searching...
No Matches
gradient.hpp
Go to the documentation of this file.
1
2#pragma once
3
4#include "../../quadrature/quadrature.hpp"
6#include "dense/vec.hpp"
10#include "linalg/operator.hpp"
11#include "linalg/vector.hpp"
12#include "linalg/vector_q1.hpp"
13#include "util/timer.hpp"
14
16
27template < typename ScalarT >
29{
30 public:
33 using ScalarType = ScalarT;
34
35 private:
37 grid::shell::DistributedDomain domain_coarse_;
38
42 BoundaryConditions bcs_;
43
44 linalg::OperatorApplyMode operator_apply_mode_;
45 linalg::OperatorCommunicationMode operator_communication_mode_;
46
49
52
53 public:
55 const grid::shell::DistributedDomain& domain_fine,
56 const grid::shell::DistributedDomain& domain_coarse,
57 const grid::Grid3DDataVec< ScalarT, 3 >& grid_fine,
58 const grid::Grid2DDataScalar< ScalarT >& radii_fine,
60 BoundaryConditions bcs,
62 linalg::OperatorCommunicationMode operator_communication_mode =
64 : domain_fine_( domain_fine )
65 , domain_coarse_( domain_coarse )
66 , grid_fine_( grid_fine )
67 , radii_( radii_fine )
68 , boundary_mask_fine_( boundary_mask_fine )
69 , operator_apply_mode_( operator_apply_mode )
70 , operator_communication_mode_( operator_communication_mode )
71 // TODO: we can reuse the send and recv buffers and pass in from the outside somehow
72 , send_buffers_( domain_fine )
73 , recv_buffers_( domain_fine )
74 {
75 bcs_[0] = bcs[0];
76 bcs_[1] = bcs[1];
77 }
78
80 const linalg::OperatorApplyMode operator_apply_mode,
81 const linalg::OperatorCommunicationMode operator_communication_mode )
82 {
83 operator_apply_mode_ = operator_apply_mode;
84 operator_communication_mode_ = operator_communication_mode;
85 }
86
87 void apply_impl( const SrcVectorType& src, DstVectorType& dst )
88 {
89 util::Timer timer_apply( "gradient_apply" );
90
91 if ( operator_apply_mode_ == linalg::OperatorApplyMode::Replace )
92 {
93 assign( dst, 0 );
94 }
95
96 src_ = src.grid_data();
97 dst_ = dst.grid_data();
98
99 util::Timer timer_kernel( "gradient_kernel" );
100 Kokkos::parallel_for( "matvec", grid::shell::local_domain_md_range_policy_cells( domain_fine_ ), *this );
101 Kokkos::fence();
102 timer_kernel.stop();
103
104 if ( operator_communication_mode_ == linalg::OperatorCommunicationMode::CommunicateAdditively )
105 {
106 util::Timer timer_comm( "gradient_comm" );
107
109 domain_fine_, dst_, send_buffers_, recv_buffers_ );
111 }
112 }
113
114 KOKKOS_INLINE_FUNCTION void
115 operator()( const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell ) const
116 {
117 // Gather surface points for each wedge.
119 wedge_surface_physical_coords( wedge_phy_surf, grid_fine_, local_subdomain_id, x_cell, y_cell );
120
121 // Gather wedge radii.
122 const ScalarT r_1 = radii_( local_subdomain_id, r_cell );
123 const ScalarT r_2 = radii_( local_subdomain_id, r_cell + 1 );
124
125 // Quadrature points.
126 constexpr auto num_quad_points = quadrature::quad_felippa_1x1_num_quad_points;
127
128 dense::Vec< ScalarT, 3 > quad_points[num_quad_points];
129 ScalarT quad_weights[num_quad_points];
130
133
134 const int fine_radial_wedge_index = r_cell % 2;
135
136 // Compute the local element matrix.
138
139 for ( int q = 0; q < num_quad_points; q++ )
140 {
141 for ( int wedge = 0; wedge < num_wedges_per_hex_cell; wedge++ )
142 {
143 const int fine_lateral_wedge_index = fine_lateral_wedge_idx( x_cell, y_cell, wedge );
144
145 const auto J = jac( wedge_phy_surf[wedge], r_1, r_2, quad_points[q] );
146 const auto det = Kokkos::abs( J.det() );
147 const auto J_inv_transposed = J.inv().transposed();
148
149 for ( int i = 0; i < num_nodes_per_wedge; i++ )
150 {
151 const auto grad_i = grad_shape( i, quad_points[q] );
152
153 for ( int j = 0; j < num_nodes_per_wedge; j++ )
154 {
155 const auto shape_j =
156 shape_coarse( j, fine_radial_wedge_index, fine_lateral_wedge_index, quad_points[q] );
157
158 for ( int d = 0; d < 3; d++ )
159 {
160 A[wedge]( d * 6 + i, j ) +=
161 quad_weights[q] * ( -( ( J_inv_transposed * grad_i )(d) *shape_j ) * det );
162 }
163 }
164 }
165 }
166 }
167
169 extract_local_wedge_scalar_coefficients( src, local_subdomain_id, x_cell / 2, y_cell / 2, r_cell / 2, src_ );
170
171 // Boundary treatment
172 bool at_cmb = util::has_flag( boundary_mask_fine_( local_subdomain_id, x_cell, y_cell, r_cell ), CMB );
173 bool at_surface =
174 util::has_flag( boundary_mask_fine_( local_subdomain_id, x_cell, y_cell, r_cell + 1 ), SURFACE );
175
176 dense::Mat< ScalarT, 18, 6 > boundary_mask;
177 boundary_mask.fill( 1.0 );
178
180 // flag to later not go through the hustle of checking the bcs
181 bool freeslip_reorder = false;
182
183 if ( at_cmb || at_surface )
184 {
185 // Inner boundary (CMB).
186 ShellBoundaryFlag sbf = at_cmb ? CMB : SURFACE;
187 BoundaryConditionFlag bcf = get_boundary_condition_flag( bcs_, sbf );
188
189 if ( bcf == DIRICHLET )
190 {
191 for ( int dimi = 0; dimi < 3; ++dimi )
192 {
193 for ( int i = 0; i < num_nodes_per_wedge; i++ )
194 {
195 for ( int j = 0; j < num_nodes_per_wedge; j++ )
196 {
197 if ( at_cmb && ( i < 3 ) || at_surface && ( i >= 3 ) )
198 {
199 boundary_mask( dimi * num_nodes_per_wedge + i, j ) = 0.0;
200 }
201 }
202 }
203 }
204 }
205 else if ( bcf == FREESLIP )
206 {
207
208 freeslip_reorder = true;
210
211 // reorder source dofs for nodes instead of velocity dims in src vector and local matrix
212 for ( int wedge = 0; wedge < 2; ++wedge )
213 {
214 for ( int node_idxi = 0; node_idxi < num_nodes_per_wedge; node_idxi++ )
215 {
216 for ( int dimi = 0; dimi < 3; ++dimi )
217 {
218 for ( int node_idxj = 0; node_idxj < num_nodes_per_wedge; node_idxj++ )
219 {
220 A_tmp[wedge]( node_idxi * 3 + dimi, node_idxj ) =
221 A[wedge]( node_idxi + dimi * num_nodes_per_wedge, node_idxj );
222 }
223 }
224 }
225 }
226
227 // assemble rotation matrices for boundary nodes
228 // e.g. if we are at CMB, we need to rotate DoFs 0, 1, 2 of each wedge
229 // at SURFACE, we need to rotate DoFs 3, 4, 5
230
231 constexpr int layer_hex_offset_x[2][3] = { { 0, 1, 0 }, { 1, 0, 1 } };
232 constexpr int layer_hex_offset_y[2][3] = { { 0, 0, 1 }, { 1, 1, 0 } };
233
234 for ( int wedge = 0; wedge < 2; ++wedge )
235 {
236 // make rotation matrix unity
237 for ( int i = 0; i < 18; ++i )
238 {
239 R[wedge]( i, i ) = 1.0;
240 }
241
242 for ( int boundary_node_idx = 0; boundary_node_idx < 3; boundary_node_idx++ )
243 {
244 // compute normal
246 local_subdomain_id,
247 x_cell + layer_hex_offset_x[wedge][boundary_node_idx],
248 y_cell + layer_hex_offset_y[wedge][boundary_node_idx],
249 r_cell + ( at_cmb ? 0 : 1 ),
250 grid_fine_,
251 radii_ );
252
253
254 // compute rotation matrix for DoFs on current node
255 auto R_i = trafo_mat_cartesian_to_normal_tangential( normal );
256
257 // insert into wedge-local rotation matrix
258 int offset_in_R = at_cmb ? 0 : 9;
259 for ( int dimi = 0; dimi < 3; ++dimi )
260 {
261 for ( int dimj = 0; dimj < 3; ++dimj )
262 {
263 R[wedge](
264 offset_in_R + boundary_node_idx * 3 + dimi,
265 offset_in_R + boundary_node_idx * 3 + dimj ) = R_i( dimi, dimj );
266 }
267 }
268 }
269
270 // transform local matrix to rotated/ normal-tangential space: pre/post multiply with rotation matrices
271 // TODO transpose this way?
272 A[wedge] = R[wedge] * A_tmp[wedge];
273
274 // eliminate normal components: Dirichlet on the normal-tangential system
275 int node_start = at_surface ? 3 : 0;
276 int node_end = at_surface ? 6 : 3;
277
278 for ( int node_idx = node_start; node_idx < node_end; node_idx++ )
279 {
280 int idx = node_idx * 3;
281 for ( int k = 0; k < 6; ++k )
282 {
283 boundary_mask( idx, k ) = 0.0;
284 }
285 }
286 }
287
288 }
289 else if ( bcf == NEUMANN ) {}
290 }
291
292 // apply boundary mask
293 for ( int wedge = 0; wedge < num_wedges_per_hex_cell; wedge++ )
294 {
295 A[wedge].hadamard_product( boundary_mask );
296 }
297
299
300 dst[0] = A[0] * src[0];
301 dst[1] = A[1] * src[1];
302
303 if ( freeslip_reorder )
304 {
305
306 // transform dst back from nt space
308 dst_tmp[0] = R[0].transposed() * dst[0];
309 dst_tmp[1] = R[1].transposed() * dst[1];
310 for ( int i = 0; i < 18; ++i )
311 {
312 dst[0]( i ) = dst_tmp[0]( i );
313 dst[1]( i ) = dst_tmp[1]( i );
314 }
315
316 // reorder to dimensionwise ordering
319
320 }
321
322 for ( int d = 0; d < 3; d++ )
323 {
325 dst_d[0] = dst[0].template slice< 6 >( d * 6 );
326 dst_d[1] = dst[1].template slice< 6 >( d * 6 );
327
329 dst_, local_subdomain_id, x_cell, y_cell, r_cell, d, dst_d );
330 }
331 }
332};
333
335
336} // namespace terra::fe::wedge::operators::shell
ScalarT ScalarType
Definition gradient.hpp:33
Gradient(const grid::shell::DistributedDomain &domain_fine, const grid::shell::DistributedDomain &domain_coarse, const grid::Grid3DDataVec< ScalarT, 3 > &grid_fine, const grid::Grid2DDataScalar< ScalarT > &radii_fine, const grid::Grid4DDataScalar< grid::shell::ShellBoundaryFlag > &boundary_mask_fine, BoundaryConditions bcs, linalg::OperatorApplyMode operator_apply_mode=linalg::OperatorApplyMode::Replace, linalg::OperatorCommunicationMode operator_communication_mode=linalg::OperatorCommunicationMode::CommunicateAdditively)
Definition gradient.hpp:54
void apply_impl(const SrcVectorType &src, DstVectorType &dst)
Definition gradient.hpp:87
void set_operator_apply_and_communication_modes(const linalg::OperatorApplyMode operator_apply_mode, const linalg::OperatorCommunicationMode operator_communication_mode)
Definition gradient.hpp:79
void operator()(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell) const
Definition gradient.hpp:115
Parallel data structure organizing the thick spherical shell metadata for distributed (MPI parallel) ...
Definition spherical_shell.hpp:2498
Q1 scalar finite element vector on a distributed shell grid.
Definition vector_q1.hpp:21
const grid::Grid4DDataScalar< ScalarType > & grid_data() const
Get const reference to grid data.
Definition vector_q1.hpp:137
const grid::Grid4DDataVec< ScalarType, VecDim > & grid_data() const
Get const reference to grid data.
Definition vector_q1.hpp:280
Timer supporting RAII scope or manual stop.
Definition timer.hpp:270
void stop()
Stop the timer and record elapsed time.
Definition timer.hpp:289
Concept for types that behave like linear operators.
Definition operator.hpp:57
void unpack_and_reduce_local_subdomain_boundaries(const grid::shell::DistributedDomain &domain, const GridDataType &data, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &boundary_recv_buffers, CommunicationReduction reduction=CommunicationReduction::SUM)
Unpacks and reduces local subdomain boundaries.
Definition communication.hpp:672
void pack_send_and_recv_local_subdomain_boundaries(const grid::shell::DistributedDomain &domain, const GridDataType &data, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &boundary_send_buffers, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &boundary_recv_buffers)
Packs, sends and recvs local subdomain boundaries using two sets of buffers.
Definition communication.hpp:242
Definition boundary_mass.hpp:14
constexpr void quad_felippa_1x1_quad_points(dense::Vec< T, 3 >(&quad_points)[quad_felippa_1x1_num_quad_points])
Definition wedge/quadrature/quadrature.hpp:36
constexpr void quad_felippa_1x1_quad_weights(T(&quad_weights)[quad_felippa_1x1_num_quad_points])
Definition wedge/quadrature/quadrature.hpp:43
constexpr int quad_felippa_1x1_num_quad_points
Definition wedge/quadrature/quadrature.hpp:32
constexpr int num_nodes_per_wedge_surface
Definition kernel_helpers.hpp:6
constexpr int fine_lateral_wedge_idx(const int x_cell_fine, const int y_cell_fine, const int wedge_idx_fine)
Returns the lateral wedge index with respect to a coarse grid wedge from the fine wedge indices.
Definition kernel_helpers.hpp:601
void wedge_surface_physical_coords(dense::Vec< T, 3 >(&wedge_surf_phy_coords)[num_wedges_per_hex_cell][num_nodes_per_wedge_surface], const grid::Grid3DDataVec< T, 3 > &lateral_grid, const int local_subdomain_id, const int x_cell, const int y_cell)
Extracts the (unit sphere) surface vertex coords of the two wedges of a hex cell.
Definition kernel_helpers.hpp:26
constexpr void reorder_local_dofs(const DoFOrdering doo_from, const DoFOrdering doo_to, dense::Vec< ScalarT, 18 > &dofs)
Definition kernel_helpers.hpp:619
void atomically_add_local_wedge_vector_coefficients(const grid::Grid4DDataVec< T, VecDim > &global_coefficients, const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int d, const dense::Vec< T, 6 > local_coefficients[2])
Performs an atomic add of the two local wedge coefficient vectors of a hex cell into the global coeff...
Definition kernel_helpers.hpp:465
constexpr T shape_coarse(const int coarse_node_idx, const int fine_radial_wedge_idx, const int fine_lateral_wedge_idx, const T xi_fine, const T eta_fine, const T zeta_fine)
Definition integrands.hpp:373
constexpr int num_wedges_per_hex_cell
Definition kernel_helpers.hpp:5
void extract_local_wedge_scalar_coefficients(dense::Vec< T, 6 >(&local_coefficients)[2], const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const grid::Grid4DDataScalar< T > &global_coefficients)
Extracts the local vector coefficients for the two wedges of a hex cell from the global coefficient v...
Definition kernel_helpers.hpp:306
constexpr int num_nodes_per_wedge
Definition kernel_helpers.hpp:7
constexpr dense::Vec< T, 3 > grad_shape(const int node_idx, const T xi, const T eta, const T zeta)
Gradient of the full shape function:
Definition integrands.hpp:228
constexpr dense::Mat< T, 3, 3 > jac(const dense::Vec< T, 3 > &p1_phy, const dense::Vec< T, 3 > &p2_phy, const dense::Vec< T, 3 > &p3_phy, const T r_1, const T r_2, const T xi, const T eta, const T zeta)
Definition integrands.hpp:643
dense::Vec< typename CoordsShellType::value_type, 3 > coords(const int subdomain, const int x, const int y, const int r, const CoordsShellType &coords_shell, const CoordsRadiiType &coords_radii)
Definition spherical_shell.hpp:2789
BoundaryConditionMapping[2] BoundaryConditions
Definition shell/bit_masks.hpp:37
ShellBoundaryFlag
FlagLike that indicates boundary types for the thick spherical shell.
Definition shell/bit_masks.hpp:12
Kokkos::MDRangePolicy< Kokkos::Rank< 4 > > local_domain_md_range_policy_cells(const DistributedDomain &distributed_domain)
Definition spherical_shell.hpp:2668
BoundaryConditionFlag get_boundary_condition_flag(const BoundaryConditions bcs, ShellBoundaryFlag sbf)
Retrieve the boundary condition flag that is associated with a location in the shell e....
Definition shell/bit_masks.hpp:42
BoundaryConditionFlag
FlagLike that indicates the type of boundary condition
Definition shell/bit_masks.hpp:25
Kokkos::View< ScalarType ***[VecDim], Layout > Grid3DDataVec
Definition grid_types.hpp:40
Kokkos::View< ScalarType ****[VecDim], Layout > Grid4DDataVec
Definition grid_types.hpp:43
Kokkos::View< ScalarType ****, Layout > Grid4DDataScalar
Definition grid_types.hpp:25
Kokkos::View< ScalarType **, Layout > Grid2DDataScalar
Definition grid_types.hpp:19
dense::Mat< ScalarType, 3, 3 > trafo_mat_cartesian_to_normal_tangential(const dense::Vec< ScalarType, 3 > &n_input)
Constructs a robust orthonormal transformation matrix from Cartesian to (normal–tangential–tangential...
Definition local_basis_trafo_normal_tangential.hpp:36
OperatorApplyMode
Modes for applying an operator to a vector.
Definition operator.hpp:30
@ Replace
Overwrite the destination vector.
OperatorCommunicationMode
Modes for communication during operator application.
Definition operator.hpp:40
@ CommunicateAdditively
Communicate and add results.
constexpr bool has_flag(E mask_value, E flag) noexcept
Checks if a bitmask value contains a specific flag.
Definition bit_masking.hpp:43
Definition mat.hpp:10
void fill(const T value)
Definition mat.hpp:201
constexpr Mat< T, Cols, Rows > transposed() const
Definition mat.hpp:187
Mat & hadamard_product(const Mat &mat)
Definition mat.hpp:213