12template <
typename ScalarType >
17 throw std::runtime_error(
"debug_sparse_assembly_vector_vec_q1_scalar: only works in serial case" );
22 throw std::runtime_error(
"debug_sparse_assembly_vector_vec_q1_scalar: vec must be on host space" );
27 Eigen::SparseVector< ScalarType > result( rows );
31 for (
int idx_0 = 0; idx_0 < vec.
grid_data().extent( 0 ); ++idx_0 )
33 for (
int idx_1 = 0; idx_1 < vec.
grid_data().extent( 1 ); ++idx_1 )
35 for (
int idx_2 = 0; idx_2 < vec.
grid_data().extent( 2 ); ++idx_2 )
37 for (
int idx_3 = 0; idx_3 < vec.
grid_data().extent( 3 ); ++idx_3 )
45 const auto value = vec.
grid_data()( idx_0, idx_1, idx_2, idx_3 );
48 result.insert( row ) = value;
71template < OperatorLike Operator >
83 throw std::runtime_error(
"debug_sparse_assembly_operator_vec_q1_scalar: only works in serial case" );
88 throw std::runtime_error(
"debug_sparse_assembly_vec_q1_scalar: tmp_src must be on host space" );
94 Eigen::SparseMatrix< double > mat( rows, cols );
100 for (
int idx_0 = 0; idx_0 < tmp_src.
grid_data().extent( 0 ); ++idx_0 )
102 for (
int idx_1 = 0; idx_1 < tmp_src.
grid_data().extent( 1 ); ++idx_1 )
104 for (
int idx_2 = 0; idx_2 < tmp_src.
grid_data().extent( 2 ); ++idx_2 )
106 for (
int idx_3 = 0; idx_3 < tmp_src.
grid_data().extent( 3 ); ++idx_3 )
115 tmp_src.
grid_data()( idx_0, idx_1, idx_2, idx_3 ) = 1.0;
124 for (
int iidx_0 = 0; iidx_0 < tmp_dst.
grid_data().extent( 0 ); ++iidx_0 )
126 for (
int iidx_1 = 0; iidx_1 < tmp_dst.
grid_data().extent( 1 ); ++iidx_1 )
128 for (
int iidx_2 = 0; iidx_2 < tmp_dst.
grid_data().extent( 2 ); ++iidx_2 )
130 for (
int iidx_3 = 0; iidx_3 < tmp_dst.
grid_data().extent( 3 ); ++iidx_3 )
133 tmp_dst.
mask_data()( iidx_0, iidx_1, iidx_2, iidx_3 ),
139 const auto value = tmp_dst.
grid_data()( iidx_0, iidx_1, iidx_2, iidx_3 );
142 mat.insert( A_row, A_col ) =
143 tmp_dst.
grid_data()( iidx_0, iidx_1, iidx_2, iidx_3 );
158 mat.makeCompressed();
Parallel data structure organizing the thick spherical shell metadata for distributed (MPI parallel) ...
Definition spherical_shell.hpp:2498
Q1 scalar finite element vector on a distributed shell grid.
Definition vector_q1.hpp:21
const grid::Grid4DDataScalar< ScalarType > & grid_data() const
Get const reference to grid data.
Definition vector_q1.hpp:137
const grid::Grid4DDataScalar< grid::NodeOwnershipFlag > & mask_data() const
Get const reference to mask data.
Definition vector_q1.hpp:142
void send_recv(const grid::shell::DistributedDomain &domain, grid::Grid4DDataScalar< ScalarType > &grid, const CommunicationReduction reduction=CommunicationReduction::SUM)
Executes packing, sending, receiving, and unpacking operations for the shell.
Definition communication.hpp:750
@ MAX
Stores the max of all received values during receive.
Definition debug_sparse_assembly.hpp:8
Eigen::SparseMatrix< double > debug_sparse_assembly_operator_vec_q1_scalar(const grid::shell::DistributedDomain &domain_src, Operator &A, VectorQ1Scalar< double > &tmp_src, VectorQ1Scalar< double > &tmp_dst)
Brute force sparse matrix assembly for debugging purposes.
Definition debug_sparse_assembly.hpp:72
Eigen::SparseVector< ScalarType > debug_sparse_assembly_vector_vec_q1_scalar(const VectorQ1Scalar< double > &vec)
Assembles a VectorQ1Scalar into a Eigen::SparseVector. Likely not efficient - only use for debugging.
Definition debug_sparse_assembly.hpp:13
void apply(LinearForm &L, typename LinearForm::DstVectorType &dst)
Apply a linear form and write to a destination vector.
Definition linear_form.hpp:37
void assign(Vector &y, const ScalarOf< Vector > &c0)
Assign a scalar value to a vector. Implements: .
Definition vector.hpp:97
int num_processes()
Definition mpi.hpp:17
constexpr bool has_flag(E mask_value, E flag) noexcept
Checks if a bitmask value contains a specific flag.
Definition bit_masking.hpp:43