Loading...
Searching...
No Matches
epsilon_divdiv_kerngen_v09_separate_scatter.hpp
Go to the documentation of this file.
1#pragma once
2
3#include "../../../quadrature/quadrature.hpp"
6#include "dense/vec.hpp"
10#include "impl/Kokkos_Profiling.hpp"
11#include "linalg/operator.hpp"
14#include "linalg/vector.hpp"
15#include "linalg/vector_q1.hpp"
16#include "util/timer.hpp"
17
19
30
31/**
32 * @brief Matrix-free / matrix-based epsilon-div-div operator on wedge elements in a spherical shell.
33 *
34 * This class supports three execution paths:
35 *
36 * 1) Slow path (local matrix path)
37 * - Used if local matrices are stored (full or selective)
38 * - Reuses assembled/stored 18x18 local matrices
39 *
40 * 2) Fast path: Dirichlet/Neumann
41 * - Matrix-free
42 * - Handles Dirichlet by skipping constrained face-node couplings (and diagonal treatment if requested)
43 * - Neumann is naturally included
44 *
45 * 3) Fast path: Free-slip
46 * - Matrix-free
47 * - Applies trial and test-side tangential projection on free-slip boundaries
48 * - Preserves behavior consistent with the slow path for iterative solves
49 *
50 * IMPORTANT DESIGN CHANGE:
51 * ------------------------
52 * The path decision is made on the HOST and cached in `kernel_path_`.
53 * `apply_impl()` dispatches to a different kernel launch per path.
54 * This avoids a runtime branch inside the hot device kernel.
55 */
56template < typename ScalarT, int VecDim = 3 >
58{
59 public:
62 using ScalarType = ScalarT;
63 static constexpr int LocalMatrixDim = 18;
66 using Team = Kokkos::TeamPolicy<>::member_type;
67
68 private:
69 // Optional element-local matrix storage (GCA/coarsening/explicit local-matrix mode)
70 LocalMatrixStorage local_matrix_storage_;
71
72 // Domain and geometry / coefficients
74 grid::Grid3DDataVec< ScalarT, 3 > grid_; ///< Lateral shell geometry (unit sphere coords)
75 grid::Grid2DDataScalar< ScalarT > radii_; ///< Radial coordinates per local subdomain
76 grid::Grid4DDataScalar< ScalarType > k_; ///< Scalar coefficient field
77 grid::Grid4DDataScalar< grid::shell::ShellBoundaryFlag > mask_; ///< Boundary flags per cell/node
78 BoundaryConditions bcs_; ///< CMB and SURFACE boundary conditions
79
80 bool diagonal_; ///< If true, apply diagonal-only action
81
82 linalg::OperatorApplyMode operator_apply_mode_;
83 linalg::OperatorCommunicationMode operator_communication_mode_;
84 linalg::OperatorStoredMatrixMode operator_stored_matrix_mode_;
85
88
89 // Device views captured by kernels
92
93 // Quadrature (Felippa 1x1 on wedge)
94 const int num_quad_points = quadrature::quad_felippa_1x1_num_quad_points;
97
98 // Local subdomain extents (in cells)
99 int local_subdomains_;
100 int hex_lat_;
101 int hex_rad_;
102 int lat_refinement_level_;
103
104 // Team-policy tiling (one team handles a slab lat_tile x lat_tile x r_tile cells)
105 int lat_tile_;
106 int r_tile_;
107 int r_passes_;
108 int r_tile_block_;
109 int lat_tiles_;
110 int r_tiles_;
111 int team_size_;
112 int blocks_;
113
114 ScalarT r_max_;
115 ScalarT r_min_;
116
117 /**
118 * @brief Kernel path selected on host.
119 *
120 * This value is computed whenever BCs or stored-matrix mode change and is then
121 * used by `apply_impl()` to select which kernel launch to issue.
122 */
123 enum class KernelPath
124 {
125 Slow,
126 FastDirichletNeumann,
127 FastFreeslip,
128 };
129 KernelPath kernel_path_ = KernelPath::FastDirichletNeumann;
130
131 private:
132 /**
133 * @brief Recompute the kernel path on host.
134 *
135 * Rules:
136 * - Any stored local matrix mode => slow path
137 * - Else if any free-slip BC on CMB or SURFACE => fast free-slip path
138 * - Else => fast Dirichlet/Neumann path
139 */
140 void update_kernel_path_flag_host_only()
141 {
142 const BoundaryConditionFlag cmb_bc = get_boundary_condition_flag( bcs_, CMB );
143 const BoundaryConditionFlag surface_bc = get_boundary_condition_flag( bcs_, SURFACE );
144
145 const bool has_freeslip = ( cmb_bc == FREESLIP ) || ( surface_bc == FREESLIP );
146 const bool has_stored_matrices = ( operator_stored_matrix_mode_ != linalg::OperatorStoredMatrixMode::Off );
147
148 if ( has_stored_matrices )
149 kernel_path_ = KernelPath::Slow;
150 else if ( has_freeslip )
151 kernel_path_ = KernelPath::FastFreeslip;
152 else
153 kernel_path_ = KernelPath::FastDirichletNeumann;
154 }
155
156 public:
158 const grid::shell::DistributedDomain& domain,
163 BoundaryConditions bcs,
164 bool diagonal,
166 linalg::OperatorCommunicationMode operator_communication_mode =
169 : domain_( domain )
170 , grid_( grid )
171 , radii_( radii )
172 , k_( k )
173 , mask_( mask )
174 , diagonal_( diagonal )
175 , operator_apply_mode_( operator_apply_mode )
176 , operator_communication_mode_( operator_communication_mode )
177 , operator_stored_matrix_mode_( operator_stored_matrix_mode )
178 , recv_buffers_( domain )
179 , comm_plan_( domain )
180 {
181 bcs_[0] = bcs[0];
182 bcs_[1] = bcs[1];
183
186
187 const grid::shell::DomainInfo& domain_info = domain_.domain_info();
188 local_subdomains_ = domain_.subdomains().size();
189 hex_lat_ = domain_info.subdomain_num_nodes_per_side_laterally() - 1;
190 hex_rad_ = domain_info.subdomain_num_nodes_radially() - 1;
191 lat_refinement_level_ = domain_info.diamond_lateral_refinement_level();
192
193 lat_tile_ = 4;
194 r_tile_ = 8;
195 r_passes_ = 1;
196 r_tile_block_ = r_tile_ * r_passes_;
197
198 lat_tiles_ = ( hex_lat_ + lat_tile_ - 1 ) / lat_tile_;
199 r_tiles_ = ( hex_rad_ + r_tile_block_ - 1 ) / r_tile_block_;
200
201 team_size_ = lat_tile_ * lat_tile_ * r_tile_;
202 blocks_ = local_subdomains_ * lat_tiles_ * lat_tiles_ * r_tiles_;
203
204 r_min_ = domain_info.radii()[0];
205 r_max_ = domain_info.radii()[domain_info.radii().size() - 1];
206
207 // Host-side path selection (no in-kernel path branching)
208 update_kernel_path_flag_host_only();
209
210 util::logroot << "[EpsilonDivDiv] tile size (x,y,r)=(" << lat_tile_ << "," << lat_tile_ << "," << r_tile_
211 << "), r_passes=" << r_passes_ << std::endl;
212 util::logroot << "[EpsilonDivDiv] number of tiles (x,y,r)=(" << lat_tiles_ << "," << lat_tiles_ << ","
213 << r_tiles_ << "), team_size=" << team_size_ << ", blocks=" << blocks_ << std::endl;
214 const char* path_name = ( kernel_path_ == KernelPath::Slow ) ? "slow" :
215 ( kernel_path_ == KernelPath::FastFreeslip ) ? "fast-freeslip" :
216 "fast-dirichlet-neumann";
217 util::logroot << "[EpsilonDivDiv] kernel path = " << path_name << std::endl;
218 }
219
221 const linalg::OperatorApplyMode operator_apply_mode,
222 const linalg::OperatorCommunicationMode operator_communication_mode )
223 {
224 operator_apply_mode_ = operator_apply_mode;
225 operator_communication_mode_ = operator_communication_mode;
226 }
227
228 void set_diagonal( bool v ) { diagonal_ = v; }
229
230 void set_boundary_conditions( BoundaryConditions bcs )
231 {
232 bcs_[0] = bcs[0];
233 bcs_[1] = bcs[1];
234
235 // Recompute host-side dispatch mode whenever BCs change
236 update_kernel_path_flag_host_only();
237 }
238
240 const grid::shell::DistributedDomain& get_domain() const { return domain_; }
243
244 KOKKOS_INLINE_FUNCTION
246 const int local_subdomain_id,
247 const int x_cell,
248 const int y_cell,
249 const int r_cell,
251 {
252 return util::has_flag( mask_( local_subdomain_id, x_cell, y_cell, r_cell ), flag );
253 }
254
256 linalg::OperatorStoredMatrixMode operator_stored_matrix_mode,
257 int level_range,
259 {
260 operator_stored_matrix_mode_ = operator_stored_matrix_mode;
261
262 if ( operator_stored_matrix_mode_ != linalg::OperatorStoredMatrixMode::Off )
263 {
265 domain_, operator_stored_matrix_mode_, level_range, GCAElements );
266 }
267
268 // Recompute host-side dispatch mode whenever storage mode changes
269 update_kernel_path_flag_host_only();
270
271 util::logroot << "[EpsilonDivDiv] (set_stored_matrix_mode) kernel path = "
272 << (( kernel_path_ == KernelPath::Slow ) ? "slow" :
273 ( kernel_path_ == KernelPath::FastFreeslip ) ? "fast-freeslip" :
274 "fast-dirichlet-neumann")
275 << std::endl;
276 }
277
278 linalg::OperatorStoredMatrixMode get_stored_matrix_mode() { return operator_stored_matrix_mode_; }
279
280 KOKKOS_INLINE_FUNCTION
282 const int local_subdomain_id,
283 const int x_cell,
284 const int y_cell,
285 const int r_cell,
286 const int wedge,
288 {
289 KOKKOS_ASSERT( operator_stored_matrix_mode_ != linalg::OperatorStoredMatrixMode::Off );
290 local_matrix_storage_.set_matrix( local_subdomain_id, x_cell, y_cell, r_cell, wedge, mat );
291 }
292
293 KOKKOS_INLINE_FUNCTION
295 const int local_subdomain_id,
296 const int x_cell,
297 const int y_cell,
298 const int r_cell,
299 const int wedge ) const
300 {
301 if ( operator_stored_matrix_mode_ != linalg::OperatorStoredMatrixMode::Off )
302 {
303 if ( !local_matrix_storage_.has_matrix( local_subdomain_id, x_cell, y_cell, r_cell, wedge ) )
304 {
305 Kokkos::abort( "No matrix found at that spatial index." );
306 }
307 return local_matrix_storage_.get_matrix( local_subdomain_id, x_cell, y_cell, r_cell, wedge );
308 }
309 return assemble_local_matrix( local_subdomain_id, x_cell, y_cell, r_cell, wedge );
310 }
311
312 /**
313 * @brief Apply the operator: dst <- Op(src) (or additive, depending on mode).
314 *
315 * Host-side responsibilities:
316 * - Handle replace/add mode initialization
317 * - Cache src/dst views for kernel capture
318 * - Build team policy
319 * - Dispatch to exactly one kernel variant based on `kernel_path_`
320 * - Optional halo communication accumulation
321 *
322 * This is where the path decision now lives (host), so device code does not branch
323 * on `kernel_path_` in the hot path.
324 */
325 void apply_impl( const SrcVectorType& src, DstVectorType& dst )
326 {
327 util::Timer timer_apply( "epsilon_divdiv_apply" );
328
329 if ( operator_apply_mode_ == linalg::OperatorApplyMode::Replace )
330 {
331 assign( dst, 0 );
332 }
333
334 dst_ = dst.grid_data();
335 src_ = src.grid_data();
336
337 util::Timer timer_kernel( "epsilon_divdiv_kernel" );
338 Kokkos::TeamPolicy<> policy( blocks_, team_size_ );
339
340 // Fast paths use shared-memory slab staging
341 if ( kernel_path_ != KernelPath::Slow )
342 {
343 policy.set_scratch_size( 0, Kokkos::PerTeam( team_shmem_size( team_size_ ) ) );
344 }
345
346 // Host-side dispatch => no per-team path branching in device code
347 if ( kernel_path_ == KernelPath::Slow )
348 {
349 Kokkos::parallel_for(
350 "epsilon_divdiv_apply_kernel_slow", policy, KOKKOS_CLASS_LAMBDA( const Team& team ) {
351 this->run_team_slow( team );
352 } );
353 }
354 else if ( kernel_path_ == KernelPath::FastFreeslip )
355 {
356 if ( diagonal_ )
357 {
358 Kokkos::parallel_for(
359 "epsilon_divdiv_apply_kernel_fast_fs_diag",
360 policy,
361 KOKKOS_CLASS_LAMBDA( const Team& team ) {
362 this->template run_team_fast_freeslip< true >( team );
363 } );
364 }
365 else
366 {
367 Kokkos::parallel_for(
368 "epsilon_divdiv_apply_kernel_fast_fs_matvec",
369 policy,
370 KOKKOS_CLASS_LAMBDA( const Team& team ) {
371 this->template run_team_fast_freeslip< false >( team );
372 } );
373 }
374 }
375 else
376 {
377 Kokkos::TeamPolicy< Kokkos::LaunchBounds< 128, 5 > > dn_policy( blocks_, team_size_ );
378 dn_policy.set_scratch_size( 0, Kokkos::PerTeam( team_shmem_size_dn( team_size_ ) ) );
379 if ( diagonal_ )
380 {
381 Kokkos::parallel_for(
382 "epsilon_divdiv_apply_kernel_fast_dn_diag",
383 dn_policy,
384 KOKKOS_CLASS_LAMBDA( const Team& team ) {
385 this->template run_team_fast_dirichlet_neumann< true >( team );
386 } );
387 }
388 else
389 {
390 Kokkos::parallel_for(
391 "epsilon_divdiv_apply_kernel_fast_dn_matvec",
392 dn_policy,
393 KOKKOS_CLASS_LAMBDA( const Team& team ) {
394 this->template run_team_fast_dirichlet_neumann< false >( team );
395 } );
396 }
397 }
398
399 Kokkos::fence();
400 timer_kernel.stop();
401
402 if ( operator_communication_mode_ == linalg::OperatorCommunicationMode::CommunicateAdditively )
403 {
404 util::Timer timer_comm( "epsilon_divdiv_comm" );
405 terra::communication::shell::send_recv_with_plan( comm_plan_, dst_, recv_buffers_ );
406 }
407 }
408
409 /**
410 * @brief Convert one gradient column into symmetric-gradient and div contributions.
411 *
412 * Given grad(phi e_dim), this helper computes:
413 * - diagonal entries (E00,E11,E22)
414 * - off-diagonal symmetric entries (sym01,sym02,sym12)
415 * - divergence contribution `gdd`
416 *
417 * The fast kernels use this repeatedly in fused operator application.
418 */
419 KOKKOS_INLINE_FUNCTION
421 const int dim,
422 const double g0,
423 const double g1,
424 const double g2,
425 double& E00,
426 double& E11,
427 double& E22,
428 double& sym01,
429 double& sym02,
430 double& sym12,
431 double& gdd ) const
432 {
433 E00 = E11 = E22 = sym01 = sym02 = sym12 = gdd = 0.0;
434
435 switch ( dim )
436 {
437 case 0:
438 E00 = g0;
439 gdd = g0;
440 sym01 = 0.5 * g1;
441 sym02 = 0.5 * g2;
442 break;
443 case 1:
444 E11 = g1;
445 gdd = g1;
446 sym01 = 0.5 * g0;
447 sym12 = 0.5 * g2;
448 break;
449 default:
450 E22 = g2;
451 gdd = g2;
452 sym02 = 0.5 * g0;
453 sym12 = 0.5 * g1;
454 break;
455 }
456 }
457
458 /**
459 * @brief Team scratch memory size for fast paths.
460 *
461 * Layout per team:
462 * [coords_sh | src_sh | k_sh | r_sh]
463 */
464 KOKKOS_INLINE_FUNCTION
465 size_t team_shmem_size( const int ts ) const
466 {
467 const int nlev = r_tile_ + 1;
468 const int n = lat_tile_ + 1;
469 const int nxy = n * n;
470
471 // coords_sh(nxy,3) + normals_sh(nxy,3) + src_sh(nxy,3,nlev) + k_sh(nxy,nlev) + r_sh(nlev) + 1
472 const size_t nscalars =
473 size_t( nxy ) * 3 + size_t( nxy ) * 3 + size_t( nxy ) * 3 * nlev + size_t( nxy ) * nlev + size_t( nlev ) + 1;
474
475 return sizeof( ScalarType ) * nscalars;
476 }
477
478 KOKKOS_INLINE_FUNCTION
479 size_t team_shmem_size_dn( const int /* ts */ ) const
480 {
481 const int nlev = r_tile_block_ + 1;
482 const int n = lat_tile_ + 1;
483 const int nxy = n * n;
484
485 // coords_sh(nxy,3) + src_sh(nxy,3,nlev) + k_sh(nxy,nlev) + r_sh(nlev)
486 const size_t nscalars =
487 size_t( nxy ) * 3 + size_t( nxy ) * 3 * nlev + size_t( nxy ) * nlev + size_t( nlev );
488
489 return sizeof( ScalarType ) * nscalars;
490 }
491
492 private:
493 /**
494 * @brief Decode a team league rank / team rank into:
495 * - tile origin (x0,y0,r0)
496 * - intra-tile thread coordinates (tx,ty,tr)
497 * - target cell (x_cell,y_cell,r_cell)
498 */
499 KOKKOS_INLINE_FUNCTION
500 void decode_team_indices(
501 const Team& team,
502 int& local_subdomain_id,
503 int& x0,
504 int& y0,
505 int& r0,
506 int& tx,
507 int& ty,
508 int& tr,
509 int& x_cell,
510 int& y_cell,
511 int& r_cell ) const
512 {
513 int tmp = team.league_rank();
514
515 const int r_tile_id = tmp % r_tiles_;
516 tmp /= r_tiles_;
517
518 const int lat_y_id = tmp % lat_tiles_;
519 tmp /= lat_tiles_;
520
521 const int lat_x_id = tmp % lat_tiles_;
522 tmp /= lat_tiles_;
523
524 local_subdomain_id = tmp;
525
526 x0 = lat_x_id * lat_tile_;
527 y0 = lat_y_id * lat_tile_;
528 r0 = r_tile_id * r_tile_block_;
529
530 const int tid = team.team_rank();
531 tr = tid % r_tile_;
532 tx = ( tid / r_tile_ ) % lat_tile_;
533 ty = tid / ( r_tile_ * lat_tile_ );
534
535 x_cell = x0 + tx;
536 y_cell = y0 + ty;
537 r_cell = r0 + tr;
538 }
539
540 // -------------------------------------------------------------------------
541 // Path-specific team entry points (NO path branching)
542 // These are the functions called directly by host-dispatched kernel launches.
543 // -------------------------------------------------------------------------
544
545 /**
546 * @brief Team entry for slow (local-matrix) path.
547 *
548 * This wrapper performs only:
549 * - team index decoding
550 * - boundary flag queries
551 * - forwarding to operator_slow_path()
552 *
553 * No dispatch branching happens here.
554 */
555 KOKKOS_INLINE_FUNCTION
556 void run_team_slow( const Team& team ) const
557 {
558 int local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell;
559 decode_team_indices( team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell );
560
561 if ( tr >= r_tile_ )
562 return;
563
564 const bool at_cmb = has_flag( local_subdomain_id, x_cell, y_cell, r_cell, CMB );
565 const bool at_surface = has_flag( local_subdomain_id, x_cell, y_cell, r_cell + 1, SURFACE );
566
567 operator_slow_path(
568 team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell, at_cmb, at_surface );
569 }
570
571 /**
572 * @brief Team entry for fast Dirichlet/Neumann matrix-free path.
573 *
574 * Templated on Diagonal so the compiler can dead-code-eliminate the
575 * unused matvec or diagonal-only path, reducing register pressure.
576 */
577 template < bool Diagonal >
578 KOKKOS_INLINE_FUNCTION
579 void run_team_fast_dirichlet_neumann( const Team& team ) const
580 {
581 int local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell;
582 decode_team_indices( team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell );
583
584 if ( tr >= r_tile_ )
585 return;
586
587 operator_fast_dirichlet_neumann_path< Diagonal >(
588 team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell );
589 }
590
591 /**
592 * @brief Team entry for fast free-slip matrix-free path.
593 *
594 * Templated on Diagonal so the compiler can dead-code-eliminate the
595 * unused matvec or diagonal-only path, reducing register pressure.
596 */
597 template < bool Diagonal >
598 KOKKOS_INLINE_FUNCTION
599 void run_team_fast_freeslip( const Team& team ) const
600 {
601 int local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell;
602 decode_team_indices( team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell );
603
604 if ( tr >= r_tile_ )
605 return;
606
607 const bool at_cmb = has_flag( local_subdomain_id, x_cell, y_cell, r_cell, CMB );
608 const bool at_surface = has_flag( local_subdomain_id, x_cell, y_cell, r_cell + 1, SURFACE );
609
610 operator_fast_freeslip_path< Diagonal >(
611 team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell, at_cmb, at_surface );
612 }
613
614 // ===================== SLOW PATH =====================
615 KOKKOS_INLINE_FUNCTION
616 void operator_slow_path(
617 const Team& team,
618 const int local_subdomain_id,
619 const int x0,
620 const int y0,
621 const int r0,
622 const int tx,
623 const int ty,
624 const int tr,
625 const int x_cell,
626 const int y_cell,
627 const int r_cell,
628 const bool at_cmb,
629 const bool at_surface ) const
630 {
631 (void) team;
632 (void) x0;
633 (void) y0;
634 (void) r0;
635 (void) tx;
636 (void) ty;
637 (void) tr;
638
639 if ( x_cell >= hex_lat_ || y_cell >= hex_lat_ || r_cell >= hex_rad_ )
640 return;
641
643
644 if ( operator_stored_matrix_mode_ == linalg::OperatorStoredMatrixMode::Full )
645 {
646 A[0] = local_matrix_storage_.get_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 0 );
647 A[1] = local_matrix_storage_.get_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 1 );
648 }
649 else if ( operator_stored_matrix_mode_ == linalg::OperatorStoredMatrixMode::Selective )
650 {
651 if ( local_matrix_storage_.has_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 0 ) &&
652 local_matrix_storage_.has_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 1 ) )
653 {
654 A[0] = local_matrix_storage_.get_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 0 );
655 A[1] = local_matrix_storage_.get_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 1 );
656 }
657 else
658 {
659 A[0] = assemble_local_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 0 );
660 A[1] = assemble_local_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 1 );
661 }
662 }
663 else
664 {
665 A[0] = assemble_local_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 0 );
666 A[1] = assemble_local_matrix( local_subdomain_id, x_cell, y_cell, r_cell, 1 );
667 }
668
669 dense::Vec< ScalarT, 18 > src[num_wedges_per_hex_cell];
670 for ( int dimj = 0; dimj < 3; dimj++ )
671 {
672 dense::Vec< ScalarT, 6 > src_d[num_wedges_per_hex_cell];
673 extract_local_wedge_vector_coefficients( src_d, local_subdomain_id, x_cell, y_cell, r_cell, dimj, src_ );
674
675 for ( int wedge = 0; wedge < num_wedges_per_hex_cell; wedge++ )
676 {
677 for ( int i = 0; i < num_nodes_per_wedge; i++ )
678 {
679 src[wedge]( dimj * num_nodes_per_wedge + i ) = src_d[wedge]( i );
680 }
681 }
682 }
683
684 dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > boundary_mask;
685 boundary_mask.fill( 1.0 );
686
687 bool freeslip_reorder = false;
688 dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > R[num_wedges_per_hex_cell];
689
690 if ( at_cmb || at_surface )
691 {
692 ShellBoundaryFlag sbf = at_cmb ? CMB : SURFACE;
694
695 if ( bcf == DIRICHLET )
696 {
697 for ( int dimi = 0; dimi < 3; ++dimi )
698 {
699 for ( int dimj = 0; dimj < 3; ++dimj )
700 {
701 for ( int i = 0; i < num_nodes_per_wedge; i++ )
702 {
703 for ( int j = 0; j < num_nodes_per_wedge; j++ )
704 {
705 if ( ( at_cmb && ( ( dimi == dimj && i != j && ( i < 3 || j < 3 ) ) ||
706 ( dimi != dimj && ( i < 3 || j < 3 ) ) ) ) ||
707 ( at_surface && ( ( dimi == dimj && i != j && ( i >= 3 || j >= 3 ) ) ||
708 ( dimi != dimj && ( i >= 3 || j >= 3 ) ) ) ) )
709 {
710 boundary_mask( i + dimi * num_nodes_per_wedge, j + dimj * num_nodes_per_wedge ) =
711 0.0;
712 }
713 }
714 }
715 }
716 }
717 }
718 else if ( bcf == FREESLIP )
719 {
720 freeslip_reorder = true;
721 dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > A_tmp[num_wedges_per_hex_cell] = { 0 };
722
723 for ( int wedge = 0; wedge < 2; ++wedge )
724 {
725 for ( int dimi = 0; dimi < 3; ++dimi )
726 {
727 for ( int node_idxi = 0; node_idxi < num_nodes_per_wedge; node_idxi++ )
728 {
729 for ( int dimj = 0; dimj < 3; ++dimj )
730 {
731 for ( int node_idxj = 0; node_idxj < num_nodes_per_wedge; node_idxj++ )
732 {
733 A_tmp[wedge]( node_idxi * 3 + dimi, node_idxj * 3 + dimj ) = A[wedge](
734 node_idxi + dimi * num_nodes_per_wedge,
735 node_idxj + dimj * num_nodes_per_wedge );
736 }
737 }
738 }
739 }
741 }
742
743 constexpr int layer_hex_offset_x[2][3] = { { 0, 1, 0 }, { 1, 0, 1 } };
744 constexpr int layer_hex_offset_y[2][3] = { { 0, 0, 1 }, { 1, 1, 0 } };
745
746 for ( int wedge = 0; wedge < 2; ++wedge )
747 {
748 for ( int i = 0; i < LocalMatrixDim; ++i )
749 {
750 R[wedge]( i, i ) = 1.0;
751 }
752
753 for ( int boundary_node_idx = 0; boundary_node_idx < 3; boundary_node_idx++ )
754 {
755 dense::Vec< double, 3 > normal = grid::shell::coords(
756 local_subdomain_id,
757 x_cell + layer_hex_offset_x[wedge][boundary_node_idx],
758 y_cell + layer_hex_offset_y[wedge][boundary_node_idx],
759 r_cell + ( at_cmb ? 0 : 1 ),
760 grid_,
761 radii_ );
762
763 auto R_i = trafo_mat_cartesian_to_normal_tangential( normal );
764
765 const int offset_in_R = at_cmb ? 0 : 9;
766 for ( int dimi = 0; dimi < 3; ++dimi )
767 {
768 for ( int dimj = 0; dimj < 3; ++dimj )
769 {
770 R[wedge](
771 offset_in_R + boundary_node_idx * 3 + dimi,
772 offset_in_R + boundary_node_idx * 3 + dimj ) = R_i( dimi, dimj );
773 }
774 }
775 }
776
777 A[wedge] = R[wedge] * A_tmp[wedge] * R[wedge].transposed();
778
779 auto src_tmp = R[wedge] * src[wedge];
780 for ( int i = 0; i < 18; ++i )
781 {
782 src[wedge]( i ) = src_tmp( i );
783 }
784
785 const int node_start = at_surface ? 3 : 0;
786 const int node_end = at_surface ? 6 : 3;
787
788 for ( int node_idx = node_start; node_idx < node_end; node_idx++ )
789 {
790 const int idx = node_idx * 3;
791 for ( int k = 0; k < 18; ++k )
792 {
793 if ( k != idx )
794 {
795 boundary_mask( idx, k ) = 0.0;
796 boundary_mask( k, idx ) = 0.0;
797 }
798 }
799 }
800 }
801 }
802 }
803
804 for ( int wedge = 0; wedge < num_wedges_per_hex_cell; wedge++ )
805 {
806 A[wedge].hadamard_product( boundary_mask );
807 }
808
809 if ( diagonal_ )
810 {
811 A[0] = A[0].diagonal();
812 A[1] = A[1].diagonal();
813 }
814
815 dense::Vec< ScalarT, LocalMatrixDim > dst[num_wedges_per_hex_cell];
816 dst[0] = A[0] * src[0];
817 dst[1] = A[1] * src[1];
818
819 if ( freeslip_reorder )
820 {
821 dense::Vec< ScalarT, LocalMatrixDim > dst_tmp[num_wedges_per_hex_cell];
822 dst_tmp[0] = R[0].transposed() * dst[0];
823 dst_tmp[1] = R[1].transposed() * dst[1];
824
825 for ( int i = 0; i < 18; ++i )
826 {
827 dst[0]( i ) = dst_tmp[0]( i );
828 dst[1]( i ) = dst_tmp[1]( i );
829 }
830
833 }
834
835 for ( int dimi = 0; dimi < 3; dimi++ )
836 {
837 dense::Vec< ScalarT, 6 > dst_d[num_wedges_per_hex_cell];
838 dst_d[0] = dst[0].template slice< 6 >( dimi * num_nodes_per_wedge );
839 dst_d[1] = dst[1].template slice< 6 >( dimi * num_nodes_per_wedge );
840
842 dst_, local_subdomain_id, x_cell, y_cell, r_cell, dimi, dst_d );
843 }
844 }
845
846 // ===================== FAST DIRICHLET/NEUMANN PATH =====================
847 template < bool Diagonal >
848 KOKKOS_INLINE_FUNCTION
849 void operator_fast_dirichlet_neumann_path(
850 const Team& team,
851 const int local_subdomain_id,
852 const int x0,
853 const int y0,
854 const int r0,
855 const int tx,
856 const int ty,
857 const int tr,
858 const int x_cell,
859 const int y_cell ) const
860 {
861 const int nlev = r_tile_block_ + 1;
862 const int nxy = ( lat_tile_ + 1 ) * ( lat_tile_ + 1 );
863
864 double* shmem =
865 reinterpret_cast< double* >( team.team_shmem().get_shmem( team_shmem_size_dn( team.team_size() ) ) );
866
867 using ScratchCoords =
868 Kokkos::View< double**, Kokkos::LayoutRight, typename Team::scratch_memory_space, Kokkos::MemoryUnmanaged >;
869 using ScratchSrc = Kokkos::
870 View< double***, Kokkos::LayoutRight, typename Team::scratch_memory_space, Kokkos::MemoryUnmanaged >;
871 using ScratchK =
872 Kokkos::View< double**, Kokkos::LayoutRight, typename Team::scratch_memory_space, Kokkos::MemoryUnmanaged >;
873
874 ScratchCoords coords_sh( shmem, nxy, 3 );
875 shmem += nxy * 3;
876
877 ScratchSrc src_sh( shmem, nxy, 3, nlev );
878 shmem += nxy * 3 * nlev;
879
880 ScratchK k_sh( shmem, nxy, nlev );
881 shmem += nxy * nlev;
882
883 auto r_sh =
884 Kokkos::View< double*, Kokkos::LayoutRight, typename Team::scratch_memory_space, Kokkos::MemoryUnmanaged >(
885 shmem, nlev );
886
887 auto node_id = [&]( int nx, int ny ) -> int { return nx + ( lat_tile_ + 1 ) * ny; };
888
889 Kokkos::parallel_for( Kokkos::TeamThreadRange( team, nxy ), [&]( int n ) {
890 const int dxn = n % ( lat_tile_ + 1 );
891 const int dyn = n / ( lat_tile_ + 1 );
892 const int xi = x0 + dxn;
893 const int yi = y0 + dyn;
894
895 if ( xi <= hex_lat_ && yi <= hex_lat_ )
896 {
897 coords_sh( n, 0 ) = grid_( local_subdomain_id, xi, yi, 0 );
898 coords_sh( n, 1 ) = grid_( local_subdomain_id, xi, yi, 1 );
899 coords_sh( n, 2 ) = grid_( local_subdomain_id, xi, yi, 2 );
900 }
901 else
902 {
903 coords_sh( n, 0 ) = coords_sh( n, 1 ) = coords_sh( n, 2 ) = 0.0;
904 }
905 } );
906
907 Kokkos::parallel_for( Kokkos::TeamThreadRange( team, nlev ), [&]( int lvl ) {
908 const int rr = r0 + lvl;
909 r_sh( lvl ) = ( rr <= hex_rad_ ) ? radii_( local_subdomain_id, rr ) : 0.0;
910 } );
911
912 const int total_pairs = nxy * nlev;
913 Kokkos::parallel_for( Kokkos::TeamThreadRange( team, total_pairs ), [&]( int t ) {
914 const int node = t / nlev;
915 const int lvl = t - node * nlev;
916
917 const int dxn = node % ( lat_tile_ + 1 );
918 const int dyn = node / ( lat_tile_ + 1 );
919
920 const int xi = x0 + dxn;
921 const int yi = y0 + dyn;
922 const int rr = r0 + lvl;
923
924 if ( xi <= hex_lat_ && yi <= hex_lat_ && rr <= hex_rad_ )
925 {
926 k_sh( node, lvl ) = k_( local_subdomain_id, xi, yi, rr );
927 src_sh( node, 0, lvl ) = src_( local_subdomain_id, xi, yi, rr, 0 );
928 src_sh( node, 1, lvl ) = src_( local_subdomain_id, xi, yi, rr, 1 );
929 src_sh( node, 2, lvl ) = src_( local_subdomain_id, xi, yi, rr, 2 );
930 }
931 else
932 {
933 k_sh( node, lvl ) = 0.0;
934 src_sh( node, 0, lvl ) = src_sh( node, 1, lvl ) = src_sh( node, 2, lvl ) = 0.0;
935 }
936 } );
937
938 team.team_barrier();
939
940 if ( x_cell >= hex_lat_ || y_cell >= hex_lat_ )
941 return;
942
943 constexpr double ONE_THIRD = 1.0 / 3.0;
944 constexpr double ONE_SIXTH = 1.0 / 6.0;
945
946 static constexpr double dN_ref[6][3] = {
947 { -0.5, -0.5, -ONE_SIXTH },
948 { 0.5, 0.0, -ONE_SIXTH },
949 { 0.0, 0.5, -ONE_SIXTH },
950 { -0.5, -0.5, ONE_SIXTH },
951 { 0.5, 0.0, ONE_SIXTH },
952 { 0.0, 0.5, ONE_SIXTH } };
953
954 static constexpr int WEDGE_NODE_OFF[2][6][3] = {
955 { { 0, 0, 0 }, { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 }, { 1, 0, 1 }, { 0, 1, 1 } },
956 { { 1, 1, 0 }, { 0, 1, 0 }, { 1, 0, 0 }, { 1, 1, 1 }, { 0, 1, 1 }, { 1, 0, 1 } } };
957
958 const int n00 = node_id( tx, ty );
959 const int n01 = node_id( tx, ty + 1 );
960 const int n10 = node_id( tx + 1, ty );
961 const int n11 = node_id( tx + 1, ty + 1 );
962
963 {
964 const int lvl0 = tr;
965 const int r_cell = r0 + tr;
966
967 if ( r_cell >= hex_rad_ )
968 return;
969
970 const double r_0 = r_sh( lvl0 );
971 const double r_1 = r_sh( lvl0 + 1 );
972
973 const bool at_cmb = has_flag( local_subdomain_id, x_cell, y_cell, r_cell, CMB );
974 const bool at_surface = has_flag( local_subdomain_id, x_cell, y_cell, r_cell + 1, SURFACE );
975
976 const bool at_boundary = at_cmb || at_surface;
977 bool treat_boundary_dirichlet = false;
978 if ( at_boundary )
979 {
980 const ShellBoundaryFlag sbf = at_cmb ? CMB : SURFACE;
981 treat_boundary_dirichlet = ( get_boundary_condition_flag( bcs_, sbf ) == DIRICHLET );
982 }
983
984 const int cmb_shift = ( ( at_boundary && treat_boundary_dirichlet && ( !Diagonal ) && at_cmb ) ? 3 : 0 );
985 const int surface_shift =
986 ( ( at_boundary && treat_boundary_dirichlet && ( !Diagonal ) && at_surface ) ? 3 : 0 );
987
988 for ( int w = 0; w < 2; ++w )
989 {
990 const int v0 = w == 0 ? n00 : n11;
991 const int v1 = w == 0 ? n10 : n01;
992 const int v2 = w == 0 ? n01 : n10;
993
994 double k_sum = 0.0;
995#pragma unroll
996 for ( int node = 0; node < 6; ++node )
997 {
998 const int nid = node_id( tx + WEDGE_NODE_OFF[w][node][0], ty + WEDGE_NODE_OFF[w][node][1] );
999 k_sum += k_sh( nid, lvl0 + WEDGE_NODE_OFF[w][node][2] );
1000 }
1001 const double k_eval = ONE_SIXTH * k_sum;
1002
1003 double kwJ;
1004
1005 // ==== Phase 1: Jacobian + Gather (gu tensor) ====
1006 // invJ lives only in this scope so the compiler can reclaim its registers.
1007 double gu00 = 0.0;
1008 double gu10 = 0.0, gu11 = 0.0;
1009 double gu20 = 0.0, gu21 = 0.0, gu22 = 0.0;
1010 double div_u = 0.0;
1011 {
1012 const double half_dr = 0.5 * ( r_1 - r_0 );
1013 const double r_mid = 0.5 * ( r_0 + r_1 );
1014
1015 const double J_0_0 = r_mid * ( -coords_sh( v0, 0 ) + coords_sh( v1, 0 ) );
1016 const double J_0_1 = r_mid * ( -coords_sh( v0, 0 ) + coords_sh( v2, 0 ) );
1017 const double J_0_2 =
1018 half_dr * ( ONE_THIRD * ( coords_sh( v0, 0 ) + coords_sh( v1, 0 ) + coords_sh( v2, 0 ) ) );
1019
1020 const double J_1_0 = r_mid * ( -coords_sh( v0, 1 ) + coords_sh( v1, 1 ) );
1021 const double J_1_1 = r_mid * ( -coords_sh( v0, 1 ) + coords_sh( v2, 1 ) );
1022 const double J_1_2 =
1023 half_dr * ( ONE_THIRD * ( coords_sh( v0, 1 ) + coords_sh( v1, 1 ) + coords_sh( v2, 1 ) ) );
1024
1025 const double J_2_0 = r_mid * ( -coords_sh( v0, 2 ) + coords_sh( v1, 2 ) );
1026 const double J_2_1 = r_mid * ( -coords_sh( v0, 2 ) + coords_sh( v2, 2 ) );
1027 const double J_2_2 =
1028 half_dr * ( ONE_THIRD * ( coords_sh( v0, 2 ) + coords_sh( v1, 2 ) + coords_sh( v2, 2 ) ) );
1029
1030 const double J_det = J_0_0 * J_1_1 * J_2_2 - J_0_0 * J_1_2 * J_2_1 -
1031 J_0_1 * J_1_0 * J_2_2 + J_0_1 * J_1_2 * J_2_0 +
1032 J_0_2 * J_1_0 * J_2_1 - J_0_2 * J_1_1 * J_2_0;
1033
1034 kwJ = k_eval * Kokkos::abs( J_det );
1035
1036 const double inv_det = 1.0 / J_det;
1037
1038 const double i00 = inv_det * ( J_1_1 * J_2_2 - J_1_2 * J_2_1 );
1039 const double i01 = inv_det * ( -J_1_0 * J_2_2 + J_1_2 * J_2_0 );
1040 const double i02 = inv_det * ( J_1_0 * J_2_1 - J_1_1 * J_2_0 );
1041 const double i10 = inv_det * ( -J_0_1 * J_2_2 + J_0_2 * J_2_1 );
1042 const double i11 = inv_det * ( J_0_0 * J_2_2 - J_0_2 * J_2_0 );
1043 const double i12 = inv_det * ( -J_0_0 * J_2_1 + J_0_1 * J_2_0 );
1044 const double i20 = inv_det * ( J_0_1 * J_1_2 - J_0_2 * J_1_1 );
1045 const double i21 = inv_det * ( -J_0_0 * J_1_2 + J_0_2 * J_1_0 );
1046 const double i22 = inv_det * ( J_0_0 * J_1_1 - J_0_1 * J_1_0 );
1047
1048 if ( !Diagonal )
1049 {
1050#pragma unroll
1051 for ( int n = cmb_shift; n < 6 - surface_shift; ++n )
1052 {
1053 const double gx = dN_ref[n][0];
1054 const double gy = dN_ref[n][1];
1055 const double gz = dN_ref[n][2];
1056 const double g0 = i00 * gx + i01 * gy + i02 * gz;
1057 const double g1 = i10 * gx + i11 * gy + i12 * gz;
1058 const double g2 = i20 * gx + i21 * gy + i22 * gz;
1059
1060 const int ddx = WEDGE_NODE_OFF[w][n][0];
1061 const int ddy = WEDGE_NODE_OFF[w][n][1];
1062 const int ddr = WEDGE_NODE_OFF[w][n][2];
1063 const int nid = node_id( tx + ddx, ty + ddy );
1064 const int lvl = lvl0 + ddr;
1065
1066 const double s0 = src_sh( nid, 0, lvl );
1067 const double s1 = src_sh( nid, 1, lvl );
1068 const double s2 = src_sh( nid, 2, lvl );
1069
1070 gu00 += g0 * s0;
1071 gu11 += g1 * s1;
1072 gu22 += g2 * s2;
1073 gu10 += 0.5 * ( g1 * s0 + g0 * s1 );
1074 gu20 += 0.5 * ( g2 * s0 + g0 * s2 );
1075 gu21 += 0.5 * ( g2 * s1 + g1 * s2 );
1076 div_u += g0 * s0 + g1 * s1 + g2 * s2;
1077 }
1078 }
1079 }
1080 // invJ (i00..i22) is now out of scope — registers can be reclaimed.
1081
1082 // ==== Phase 2: Recompute Jacobian + Scatter ====
1083 {
1084 const double half_dr = 0.5 * ( r_1 - r_0 );
1085 const double r_mid = 0.5 * ( r_0 + r_1 );
1086
1087 const double J_0_0 = r_mid * ( -coords_sh( v0, 0 ) + coords_sh( v1, 0 ) );
1088 const double J_0_1 = r_mid * ( -coords_sh( v0, 0 ) + coords_sh( v2, 0 ) );
1089 const double J_0_2 =
1090 half_dr * ( ONE_THIRD * ( coords_sh( v0, 0 ) + coords_sh( v1, 0 ) + coords_sh( v2, 0 ) ) );
1091
1092 const double J_1_0 = r_mid * ( -coords_sh( v0, 1 ) + coords_sh( v1, 1 ) );
1093 const double J_1_1 = r_mid * ( -coords_sh( v0, 1 ) + coords_sh( v2, 1 ) );
1094 const double J_1_2 =
1095 half_dr * ( ONE_THIRD * ( coords_sh( v0, 1 ) + coords_sh( v1, 1 ) + coords_sh( v2, 1 ) ) );
1096
1097 const double J_2_0 = r_mid * ( -coords_sh( v0, 2 ) + coords_sh( v1, 2 ) );
1098 const double J_2_1 = r_mid * ( -coords_sh( v0, 2 ) + coords_sh( v2, 2 ) );
1099 const double J_2_2 =
1100 half_dr * ( ONE_THIRD * ( coords_sh( v0, 2 ) + coords_sh( v1, 2 ) + coords_sh( v2, 2 ) ) );
1101
1102 const double J_det = J_0_0 * J_1_1 * J_2_2 - J_0_0 * J_1_2 * J_2_1 -
1103 J_0_1 * J_1_0 * J_2_2 + J_0_1 * J_1_2 * J_2_0 +
1104 J_0_2 * J_1_0 * J_2_1 - J_0_2 * J_1_1 * J_2_0;
1105
1106 const double inv_det = 1.0 / J_det;
1107
1108 const double i00 = inv_det * ( J_1_1 * J_2_2 - J_1_2 * J_2_1 );
1109 const double i01 = inv_det * ( -J_1_0 * J_2_2 + J_1_2 * J_2_0 );
1110 const double i02 = inv_det * ( J_1_0 * J_2_1 - J_1_1 * J_2_0 );
1111 const double i10 = inv_det * ( -J_0_1 * J_2_2 + J_0_2 * J_2_1 );
1112 const double i11 = inv_det * ( J_0_0 * J_2_2 - J_0_2 * J_2_0 );
1113 const double i12 = inv_det * ( -J_0_0 * J_2_1 + J_0_1 * J_2_0 );
1114 const double i20 = inv_det * ( J_0_1 * J_1_2 - J_0_2 * J_1_1 );
1115 const double i21 = inv_det * ( -J_0_0 * J_1_2 + J_0_2 * J_1_0 );
1116 const double i22 = inv_det * ( J_0_0 * J_1_1 - J_0_1 * J_1_0 );
1117
1118 if ( !Diagonal )
1119 {
1120 constexpr double NEG_TWO_THIRDS = -0.66666666666666663;
1121#pragma unroll
1122 for ( int n = cmb_shift; n < 6 - surface_shift; ++n )
1123 {
1124 const double gx = dN_ref[n][0];
1125 const double gy = dN_ref[n][1];
1126 const double gz = dN_ref[n][2];
1127 const double g0 = i00 * gx + i01 * gy + i02 * gz;
1128 const double g1 = i10 * gx + i11 * gy + i12 * gz;
1129 const double g2 = i20 * gx + i21 * gy + i22 * gz;
1130
1131 const int ddx = WEDGE_NODE_OFF[w][n][0];
1132 const int ddy = WEDGE_NODE_OFF[w][n][1];
1133 const int ddr = WEDGE_NODE_OFF[w][n][2];
1134 Kokkos::atomic_add(
1135 &dst_( local_subdomain_id, x_cell + ddx, y_cell + ddy, r_cell + ddr, 0 ),
1136 kwJ * ( 2.0 * ( g0 * gu00 + g1 * gu10 + g2 * gu20 ) +
1137 NEG_TWO_THIRDS * g0 * div_u ) );
1138 Kokkos::atomic_add(
1139 &dst_( local_subdomain_id, x_cell + ddx, y_cell + ddy, r_cell + ddr, 1 ),
1140 kwJ * ( 2.0 * ( g0 * gu10 + g1 * gu11 + g2 * gu21 ) +
1141 NEG_TWO_THIRDS * g1 * div_u ) );
1142 Kokkos::atomic_add(
1143 &dst_( local_subdomain_id, x_cell + ddx, y_cell + ddy, r_cell + ddr, 2 ),
1144 kwJ * ( 2.0 * ( g0 * gu20 + g1 * gu21 + g2 * gu22 ) +
1145 NEG_TWO_THIRDS * g2 * div_u ) );
1146 }
1147 }
1148
1149 if ( Diagonal || ( treat_boundary_dirichlet && at_boundary ) )
1150 {
1151#pragma unroll
1152 for ( int n = surface_shift; n < 6 - cmb_shift; ++n )
1153 {
1154 const double gx = dN_ref[n][0];
1155 const double gy = dN_ref[n][1];
1156 const double gz = dN_ref[n][2];
1157 const double g0 = i00 * gx + i01 * gy + i02 * gz;
1158 const double g1 = i10 * gx + i11 * gy + i12 * gz;
1159 const double g2 = i20 * gx + i21 * gy + i22 * gz;
1160 const double gg = g0 * g0 + g1 * g1 + g2 * g2;
1161
1162 const int nid = node_id( tx + WEDGE_NODE_OFF[w][n][0], ty + WEDGE_NODE_OFF[w][n][1] );
1163 const int lvl = lvl0 + WEDGE_NODE_OFF[w][n][2];
1164
1165 const double sv0 = src_sh( nid, 0, lvl );
1166 const double sv1 = src_sh( nid, 1, lvl );
1167 const double sv2 = src_sh( nid, 2, lvl );
1168
1169 const int ddx = WEDGE_NODE_OFF[w][n][0];
1170 const int ddy = WEDGE_NODE_OFF[w][n][1];
1171 const int ddr = WEDGE_NODE_OFF[w][n][2];
1172 Kokkos::atomic_add(
1173 &dst_( local_subdomain_id, x_cell + ddx, y_cell + ddy, r_cell + ddr, 0 ),
1174 kwJ * sv0 * ( gg + ONE_THIRD * g0 * g0 ) );
1175 Kokkos::atomic_add(
1176 &dst_( local_subdomain_id, x_cell + ddx, y_cell + ddy, r_cell + ddr, 1 ),
1177 kwJ * sv1 * ( gg + ONE_THIRD * g1 * g1 ) );
1178 Kokkos::atomic_add(
1179 &dst_( local_subdomain_id, x_cell + ddx, y_cell + ddy, r_cell + ddr, 2 ),
1180 kwJ * sv2 * ( gg + ONE_THIRD * g2 * g2 ) );
1181 }
1182 }
1183 }
1184
1185 } // end wedge loop
1186 }
1187 }
1188
1189 // ===================== FAST FREESLIP PATH =====================
1190 KOKKOS_INLINE_FUNCTION
1191 void normalize3( double& x, double& y, double& z ) const
1192 {
1193 const double n2 = x * x + y * y + z * z;
1194 if ( n2 > 0.0 )
1195 {
1196 const double invn = 1.0 / Kokkos::sqrt( n2 );
1197 x *= invn;
1198 y *= invn;
1199 z *= invn;
1200 }
1201 }
1202
1203 KOKKOS_INLINE_FUNCTION
1204 void project_tangential_inplace(
1205 const double nx,
1206 const double ny,
1207 const double nz,
1208 double& ux,
1209 double& uy,
1210 double& uz ) const
1211 {
1212 const double dot = nx * ux + ny * uy + nz * uz;
1213 ux -= dot * nx;
1214 uy -= dot * ny;
1215 uz -= dot * nz;
1216 }
1217
1218 template < bool Diagonal >
1219 KOKKOS_INLINE_FUNCTION
1220 void operator_fast_freeslip_path(
1221 const Team& team,
1222 const int local_subdomain_id,
1223 const int x0,
1224 const int y0,
1225 const int r0,
1226 const int tx,
1227 const int ty,
1228 const int tr,
1229 const int x_cell,
1230 const int y_cell,
1231 const int r_cell,
1232 const bool at_cmb,
1233 const bool at_surface ) const
1234 {
1235 const int nlev = r_tile_ + 1;
1236 const int nxy = ( lat_tile_ + 1 ) * ( lat_tile_ + 1 );
1237
1238 double* shmem =
1239 reinterpret_cast< double* >( team.team_shmem().get_shmem( team_shmem_size( team.team_size() ) ) );
1240
1241 using ScratchCoords =
1242 Kokkos::View< double**, Kokkos::LayoutRight, typename Team::scratch_memory_space, Kokkos::MemoryUnmanaged >;
1243 using ScratchSrc = Kokkos::
1244 View< double***, Kokkos::LayoutRight, typename Team::scratch_memory_space, Kokkos::MemoryUnmanaged >;
1245 using ScratchK =
1246 Kokkos::View< double**, Kokkos::LayoutRight, typename Team::scratch_memory_space, Kokkos::MemoryUnmanaged >;
1247
1248 ScratchCoords coords_sh( shmem, nxy, 3 );
1249 shmem += nxy * 3;
1250
1251 // Normalized normals in shared memory — avoids 12 persistent register-doubles.
1252 ScratchCoords normals_sh( shmem, nxy, 3 );
1253 shmem += nxy * 3;
1254
1255 ScratchSrc src_sh( shmem, nxy, 3, nlev );
1256 shmem += nxy * 3 * nlev;
1257
1258 ScratchK k_sh( shmem, nxy, nlev );
1259 shmem += nxy * nlev;
1260
1261 auto r_sh =
1262 Kokkos::View< double*, Kokkos::LayoutRight, typename Team::scratch_memory_space, Kokkos::MemoryUnmanaged >(
1263 shmem, nlev );
1264
1265 auto node_id = [&]( int nx, int ny ) -> int { return nx + ( lat_tile_ + 1 ) * ny; };
1266
1267 // Preload coords + compute normalized normals cooperatively.
1268 Kokkos::parallel_for( Kokkos::TeamThreadRange( team, nxy ), [&]( int n ) {
1269 const int dxn = n % ( lat_tile_ + 1 );
1270 const int dyn = n / ( lat_tile_ + 1 );
1271 const int xi = x0 + dxn;
1272 const int yi = y0 + dyn;
1273
1274 if ( xi <= hex_lat_ && yi <= hex_lat_ )
1275 {
1276 const double cx = grid_( local_subdomain_id, xi, yi, 0 );
1277 const double cy = grid_( local_subdomain_id, xi, yi, 1 );
1278 const double cz = grid_( local_subdomain_id, xi, yi, 2 );
1279 coords_sh( n, 0 ) = cx;
1280 coords_sh( n, 1 ) = cy;
1281 coords_sh( n, 2 ) = cz;
1282
1283 const double n2 = cx * cx + cy * cy + cz * cz;
1284 if ( n2 > 0.0 )
1285 {
1286 const double invn = 1.0 / Kokkos::sqrt( n2 );
1287 normals_sh( n, 0 ) = cx * invn;
1288 normals_sh( n, 1 ) = cy * invn;
1289 normals_sh( n, 2 ) = cz * invn;
1290 }
1291 else
1292 {
1293 normals_sh( n, 0 ) = normals_sh( n, 1 ) = normals_sh( n, 2 ) = 0.0;
1294 }
1295 }
1296 else
1297 {
1298 coords_sh( n, 0 ) = coords_sh( n, 1 ) = coords_sh( n, 2 ) = 0.0;
1299 normals_sh( n, 0 ) = normals_sh( n, 1 ) = normals_sh( n, 2 ) = 0.0;
1300 }
1301 } );
1302
1303 Kokkos::parallel_for( Kokkos::TeamThreadRange( team, nlev ), [&]( int lvl ) {
1304 const int rr = r0 + lvl;
1305 r_sh( lvl ) = ( rr <= hex_rad_ ) ? radii_( local_subdomain_id, rr ) : 0.0;
1306 } );
1307
1308 const int total_pairs = nxy * nlev;
1309 Kokkos::parallel_for( Kokkos::TeamThreadRange( team, total_pairs ), [&]( int t ) {
1310 const int node = t / nlev;
1311 const int lvl = t - node * nlev;
1312
1313 const int dxn = node % ( lat_tile_ + 1 );
1314 const int dyn = node / ( lat_tile_ + 1 );
1315
1316 const int xi = x0 + dxn;
1317 const int yi = y0 + dyn;
1318 const int rr = r0 + lvl;
1319
1320 if ( xi <= hex_lat_ && yi <= hex_lat_ && rr <= hex_rad_ )
1321 {
1322 k_sh( node, lvl ) = k_( local_subdomain_id, xi, yi, rr );
1323 src_sh( node, 0, lvl ) = src_( local_subdomain_id, xi, yi, rr, 0 );
1324 src_sh( node, 1, lvl ) = src_( local_subdomain_id, xi, yi, rr, 1 );
1325 src_sh( node, 2, lvl ) = src_( local_subdomain_id, xi, yi, rr, 2 );
1326 }
1327 else
1328 {
1329 k_sh( node, lvl ) = 0.0;
1330 src_sh( node, 0, lvl ) = src_sh( node, 1, lvl ) = src_sh( node, 2, lvl ) = 0.0;
1331 }
1332 } );
1333
1334 team.team_barrier();
1335
1336 if ( x_cell >= hex_lat_ || y_cell >= hex_lat_ || r_cell >= hex_rad_ )
1337 return;
1338
1339 const int lvl0 = tr;
1340 const double r_0 = r_sh( lvl0 );
1341 const double r_1 = r_sh( lvl0 + 1 );
1342
1343 const BoundaryConditionFlag cmb_bc = get_boundary_condition_flag( bcs_, CMB );
1344 const BoundaryConditionFlag surface_bc = get_boundary_condition_flag( bcs_, SURFACE );
1345
1346 const bool cmb_freeslip = at_cmb && ( cmb_bc == FREESLIP );
1347 const bool surf_freeslip = at_surface && ( surface_bc == FREESLIP );
1348 const bool cmb_dirichlet = at_cmb && ( cmb_bc == DIRICHLET );
1349 const bool surface_dirichlet = at_surface && ( surface_bc == DIRICHLET );
1350
1351 const int cmb_shift = ( ( cmb_dirichlet && ( !Diagonal ) ) ? 3 : 0 );
1352 const int surface_shift = ( ( surface_dirichlet && ( !Diagonal ) ) ? 3 : 0 );
1353
1354 static constexpr int WEDGE_NODE_OFF[2][6][3] = {
1355 { { 0, 0, 0 }, { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 }, { 1, 0, 1 }, { 0, 1, 1 } },
1356 { { 1, 1, 0 }, { 0, 1, 0 }, { 1, 0, 0 }, { 1, 1, 1 }, { 0, 1, 1 }, { 1, 0, 1 } } };
1357
1358 static constexpr int WEDGE_TO_UNIQUE[2][6] = {
1359 { 0, 1, 2, 3, 4, 5 },
1360 { 6, 2, 1, 7, 5, 4 }
1361 };
1362
1363 constexpr double ONE_THIRD = 1.0 / 3.0;
1364 constexpr double ONE_SIXTH = 1.0 / 6.0;
1365 constexpr double NEG_TWO_THIRDS = -0.66666666666666663;
1366
1367 static constexpr double dN_ref[6][3] = {
1368 { -0.5, -0.5, -ONE_SIXTH },
1369 { 0.5, 0.0, -ONE_SIXTH },
1370 { 0.0, 0.5, -ONE_SIXTH },
1371 { -0.5, -0.5, ONE_SIXTH },
1372 { 0.5, 0.0, ONE_SIXTH },
1373 { 0.0, 0.5, ONE_SIXTH } };
1374
1375 const int n00 = node_id( tx, ty );
1376 const int n01 = node_id( tx, ty + 1 );
1377 const int n10 = node_id( tx + 1, ty );
1378 const int n11 = node_id( tx + 1, ty + 1 );
1379
1380 // Corner-to-shared-memory-node mapping: 0→n00, 1→n10, 2→n01, 3→n11.
1381 const int corner_node[4] = { n00, n10, n01, n11 };
1382 // Corner-to-unique-node for CMB (r=0) and surface (r=1) layers.
1383 static constexpr int CMB_CORNER_TO_UNIQUE[4] = { 0, 1, 2, 6 };
1384 static constexpr int SURF_CORNER_TO_UNIQUE[4] = { 3, 4, 5, 7 };
1385
1386 double dst8[3][8] = { { 0.0 } };
1387
1388 // Scalar Ann accumulators per corner. Accumulated inside the test-side loop
1389 // (merged from the former separate Ann loop to reuse already-computed gradients).
1390 double Ann_acc_cmb[4] = {};
1391 double Ann_acc_surf[4] = {};
1392
1393 for ( int w = 0; w < 2; ++w )
1394 {
1395 const int v0 = w == 0 ? n00 : n11;
1396 const int v1 = w == 0 ? n10 : n01;
1397 const int v2 = w == 0 ? n01 : n10;
1398
1399 double k_sum = 0.0;
1400#pragma unroll
1401 for ( int node = 0; node < 6; ++node )
1402 {
1403 const int ddx = WEDGE_NODE_OFF[w][node][0];
1404 const int ddy = WEDGE_NODE_OFF[w][node][1];
1405 const int ddr = WEDGE_NODE_OFF[w][node][2];
1406
1407 const int nid = node_id( tx + ddx, ty + ddy );
1408 const int lvl = lvl0 + ddr;
1409 k_sum += k_sh( nid, lvl );
1410 }
1411 const double k_eval = ONE_SIXTH * k_sum;
1412
1413 double wJ = 0.0;
1414 double i00, i01, i02;
1415 double i10, i11, i12;
1416 double i20, i21, i22;
1417
1418 {
1419 const double half_dr = 0.5 * ( r_1 - r_0 );
1420 const double r_mid = 0.5 * ( r_0 + r_1 );
1421
1422 const double J_0_0 = r_mid * ( -coords_sh( v0, 0 ) + coords_sh( v1, 0 ) );
1423 const double J_0_1 = r_mid * ( -coords_sh( v0, 0 ) + coords_sh( v2, 0 ) );
1424 const double J_0_2 = half_dr * ( ONE_THIRD * ( coords_sh( v0, 0 ) + coords_sh( v1, 0 ) + coords_sh( v2, 0 ) ) );
1425
1426 const double J_1_0 = r_mid * ( -coords_sh( v0, 1 ) + coords_sh( v1, 1 ) );
1427 const double J_1_1 = r_mid * ( -coords_sh( v0, 1 ) + coords_sh( v2, 1 ) );
1428 const double J_1_2 = half_dr * ( ONE_THIRD * ( coords_sh( v0, 1 ) + coords_sh( v1, 1 ) + coords_sh( v2, 1 ) ) );
1429
1430 const double J_2_0 = r_mid * ( -coords_sh( v0, 2 ) + coords_sh( v1, 2 ) );
1431 const double J_2_1 = r_mid * ( -coords_sh( v0, 2 ) + coords_sh( v2, 2 ) );
1432 const double J_2_2 = half_dr * ( ONE_THIRD * ( coords_sh( v0, 2 ) + coords_sh( v1, 2 ) + coords_sh( v2, 2 ) ) );
1433
1434 const double J_det = J_0_0 * J_1_1 * J_2_2 - J_0_0 * J_1_2 * J_2_1 - J_0_1 * J_1_0 * J_2_2 +
1435 J_0_1 * J_1_2 * J_2_0 + J_0_2 * J_1_0 * J_2_1 - J_0_2 * J_1_1 * J_2_0;
1436
1437 const double invJ = 1.0 / J_det;
1438
1439 i00 = invJ * ( J_1_1 * J_2_2 - J_1_2 * J_2_1 );
1440 i01 = invJ * ( -J_1_0 * J_2_2 + J_1_2 * J_2_0 );
1441 i02 = invJ * ( J_1_0 * J_2_1 - J_1_1 * J_2_0 );
1442
1443 i10 = invJ * ( -J_0_1 * J_2_2 + J_0_2 * J_2_1 );
1444 i11 = invJ * ( J_0_0 * J_2_2 - J_0_2 * J_2_0 );
1445 i12 = invJ * ( -J_0_0 * J_2_1 + J_0_1 * J_2_0 );
1446
1447 i20 = invJ * ( J_0_1 * J_1_2 - J_0_2 * J_1_1 );
1448 i21 = invJ * ( -J_0_0 * J_1_2 + J_0_2 * J_1_0 );
1449 i22 = invJ * ( J_0_0 * J_1_1 - J_0_1 * J_1_0 );
1450
1451 wJ = Kokkos::abs( J_det );
1452 }
1453
1454 const double kwJ = k_eval * wJ;
1455
1456 // ---- Fused trial + test side with merged Ann accumulation ----
1457 static constexpr int CMB_NODE_TO_CORNER[2][3] = { { 0, 1, 2 }, { 3, 2, 1 } };
1458
1459 double gu00 = 0.0;
1460 double gu10 = 0.0, gu11 = 0.0;
1461 double gu20 = 0.0, gu21 = 0.0, gu22 = 0.0;
1462 double div_u = 0.0;
1463
1464 if ( !Diagonal )
1465 {
1466 // Trial side: accumulate symmetric gradient of u (fused dim loops).
1467 // Read directly from shared memory with inline tangential projection for freeslip nodes.
1468#pragma unroll
1469 for ( int n = cmb_shift; n < 6 - surface_shift; ++n )
1470 {
1471 const double gx = dN_ref[n][0];
1472 const double gy = dN_ref[n][1];
1473 const double gz = dN_ref[n][2];
1474 const double g0 = i00 * gx + i01 * gy + i02 * gz;
1475 const double g1 = i10 * gx + i11 * gy + i12 * gz;
1476 const double g2 = i20 * gx + i21 * gy + i22 * gz;
1477
1478 const int nid = node_id( tx + WEDGE_NODE_OFF[w][n][0], ty + WEDGE_NODE_OFF[w][n][1] );
1479 const int lvl = lvl0 + WEDGE_NODE_OFF[w][n][2];
1480
1481 double s0 = src_sh( nid, 0, lvl );
1482 double s1 = src_sh( nid, 1, lvl );
1483 double s2 = src_sh( nid, 2, lvl );
1484
1485 // Inline tangential projection for freeslip boundary nodes.
1486 if ( cmb_freeslip && n < 3 )
1487 {
1488 const double nx = normals_sh( nid, 0 );
1489 const double ny = normals_sh( nid, 1 );
1490 const double nz = normals_sh( nid, 2 );
1491 const double dot = nx * s0 + ny * s1 + nz * s2;
1492 s0 -= dot * nx;
1493 s1 -= dot * ny;
1494 s2 -= dot * nz;
1495 }
1496 if ( surf_freeslip && n >= 3 )
1497 {
1498 const double nx = normals_sh( nid, 0 );
1499 const double ny = normals_sh( nid, 1 );
1500 const double nz = normals_sh( nid, 2 );
1501 const double dot = nx * s0 + ny * s1 + nz * s2;
1502 s0 -= dot * nx;
1503 s1 -= dot * ny;
1504 s2 -= dot * nz;
1505 }
1506
1507 gu00 += g0 * s0;
1508 gu11 += g1 * s1;
1509 gu22 += g2 * s2;
1510 gu10 += 0.5 * ( g1 * s0 + g0 * s1 );
1511 gu20 += 0.5 * ( g2 * s0 + g0 * s2 );
1512 gu21 += 0.5 * ( g2 * s1 + g1 * s2 );
1513 div_u += g0 * s0 + g1 * s1 + g2 * s2;
1514 }
1515
1516 // Test side + merged Ann accumulation.
1517 // Ann uses the same gradient already computed — no separate loop needed.
1518#pragma unroll
1519 for ( int n = cmb_shift; n < 6 - surface_shift; ++n )
1520 {
1521 const double gx = dN_ref[n][0];
1522 const double gy = dN_ref[n][1];
1523 const double gz = dN_ref[n][2];
1524 const double g0 = i00 * gx + i01 * gy + i02 * gz;
1525 const double g1 = i10 * gx + i11 * gy + i12 * gz;
1526 const double g2 = i20 * gx + i21 * gy + i22 * gz;
1527
1528 const int uid = WEDGE_TO_UNIQUE[w][n];
1529 dst8[0][uid] +=
1530 kwJ * ( 2.0 * ( g0 * gu00 + g1 * gu10 + g2 * gu20 ) + NEG_TWO_THIRDS * g0 * div_u );
1531 dst8[1][uid] +=
1532 kwJ * ( 2.0 * ( g0 * gu10 + g1 * gu11 + g2 * gu21 ) + NEG_TWO_THIRDS * g1 * div_u );
1533 dst8[2][uid] +=
1534 kwJ * ( 2.0 * ( g0 * gu20 + g1 * gu21 + g2 * gu22 ) + NEG_TWO_THIRDS * g2 * div_u );
1535
1536 // Accumulate Ann for freeslip CMB nodes (n < 3) and surface nodes (n >= 3).
1537 if ( cmb_freeslip && n < 3 )
1538 {
1539 const int corner = CMB_NODE_TO_CORNER[w][n];
1540 const int cn = corner_node[corner];
1541 const double nxu = normals_sh( cn, 0 );
1542 const double nyu = normals_sh( cn, 1 );
1543 const double nzu = normals_sh( cn, 2 );
1544 const double gg = g0 * g0 + g1 * g1 + g2 * g2;
1545 const double ng = nxu * g0 + nyu * g1 + nzu * g2;
1546 Ann_acc_cmb[corner] += kwJ * ( gg + ONE_THIRD * ng * ng );
1547 }
1548 if ( surf_freeslip && n >= 3 )
1549 {
1550 const int corner = CMB_NODE_TO_CORNER[w][n - 3];
1551 const int cn = corner_node[corner];
1552 const double nxu = normals_sh( cn, 0 );
1553 const double nyu = normals_sh( cn, 1 );
1554 const double nzu = normals_sh( cn, 2 );
1555 const double gg = g0 * g0 + g1 * g1 + g2 * g2;
1556 const double ng = nxu * g0 + nyu * g1 + nzu * g2;
1557 Ann_acc_surf[corner] += kwJ * ( gg + ONE_THIRD * ng * ng );
1558 }
1559 }
1560 }
1561
1562 // ---- Diagonal / Dirichlet boundary handling (fused) ----
1563 if ( Diagonal || cmb_dirichlet || surface_dirichlet )
1564 {
1565 // Fused diagonal: kwJ * s_d * (|g|^2 + (1/3) * g_d^2)
1566#pragma unroll
1567 for ( int n = surface_shift; n < 6 - cmb_shift; ++n )
1568 {
1569 if ( Diagonal && cmb_freeslip && n < 3 )
1570 continue;
1571 if ( Diagonal && surf_freeslip && n >= 3 )
1572 continue;
1573
1574 const double gx = dN_ref[n][0];
1575 const double gy = dN_ref[n][1];
1576 const double gz = dN_ref[n][2];
1577 const double g0 = i00 * gx + i01 * gy + i02 * gz;
1578 const double g1 = i10 * gx + i11 * gy + i12 * gz;
1579 const double g2 = i20 * gx + i21 * gy + i22 * gz;
1580 const double gg = g0 * g0 + g1 * g1 + g2 * g2;
1581
1582 const int uid = WEDGE_TO_UNIQUE[w][n];
1583 const int nid = node_id( tx + WEDGE_NODE_OFF[w][n][0], ty + WEDGE_NODE_OFF[w][n][1] );
1584 const int lvl = lvl0 + WEDGE_NODE_OFF[w][n][2];
1585 const double s0 = src_sh( nid, 0, lvl );
1586 const double s1 = src_sh( nid, 1, lvl );
1587 const double s2 = src_sh( nid, 2, lvl );
1588
1589 dst8[0][uid] += kwJ * s0 * ( gg + ONE_THIRD * g0 * g0 );
1590 dst8[1][uid] += kwJ * s1 * ( gg + ONE_THIRD * g1 * g1 );
1591 dst8[2][uid] += kwJ * s2 * ( gg + ONE_THIRD * g2 * g2 );
1592 }
1593
1594 // For free-slip boundary nodes in diagonal mode: compute R^T diag(R A_3x3 R^T) R src.
1595 // Normals loaded from shared memory, u_n recomputed from src_sh (original, unprojected).
1596 if ( Diagonal )
1597 {
1598 static constexpr int FS_CORNER_MAP[2][3] = { { 0, 1, 2 }, { 3, 2, 1 } };
1599
1600 auto apply_rotated_diag =
1601 [&]( const int ni, const int node_idx, const int src_lvl ) {
1602 const int corner = FS_CORNER_MAP[w][ni];
1603 const int cn = corner_node[corner];
1604 const double nxu = normals_sh( cn, 0 );
1605 const double nyu = normals_sh( cn, 1 );
1606 const double nzu = normals_sh( cn, 2 );
1607 const int u = WEDGE_TO_UNIQUE[w][node_idx];
1608
1609 const double gx = dN_ref[node_idx][0];
1610 const double gy = dN_ref[node_idx][1];
1611 const double gz = dN_ref[node_idx][2];
1612
1613 const double g0 = i00 * gx + i01 * gy + i02 * gz;
1614 const double g1 = i10 * gx + i11 * gy + i12 * gz;
1615 const double g2 = i20 * gx + i21 * gy + i22 * gz;
1616 const double gg_loc = g0 * g0 + g1 * g1 + g2 * g2;
1617
1618 dense::Vec< double, 3 > n_vec;
1619 n_vec( 0 ) = nxu;
1620 n_vec( 1 ) = nyu;
1621 n_vec( 2 ) = nzu;
1622 const auto R_rot = trafo_mat_cartesian_to_normal_tangential< double >( n_vec );
1623
1624 // Read original (unprojected) src directly from shared memory.
1625 // (Previously: projected src8 + u_n*n = original, now simplified.)
1626 const double s0 = src_sh( cn, 0, src_lvl );
1627 const double s1 = src_sh( cn, 1, src_lvl );
1628 const double s2 = src_sh( cn, 2, src_lvl );
1629
1630 const double Rg0 = R_rot( 0, 0 ) * g0 + R_rot( 0, 1 ) * g1 + R_rot( 0, 2 ) * g2;
1631 const double Rg1 = R_rot( 1, 0 ) * g0 + R_rot( 1, 1 ) * g1 + R_rot( 1, 2 ) * g2;
1632 const double Rg2 = R_rot( 2, 0 ) * g0 + R_rot( 2, 1 ) * g1 + R_rot( 2, 2 ) * g2;
1633 const double Rs0 = R_rot( 0, 0 ) * s0 + R_rot( 0, 1 ) * s1 + R_rot( 0, 2 ) * s2;
1634 const double Rs1 = R_rot( 1, 0 ) * s0 + R_rot( 1, 1 ) * s1 + R_rot( 1, 2 ) * s2;
1635 const double Rs2 = R_rot( 2, 0 ) * s0 + R_rot( 2, 1 ) * s1 + R_rot( 2, 2 ) * s2;
1636
1637 const double v0 = kwJ * ( gg_loc + ONE_THIRD * Rg0 * Rg0 ) * Rs0;
1638 const double v1 = kwJ * ( gg_loc + ONE_THIRD * Rg1 * Rg1 ) * Rs1;
1639 const double v2 = kwJ * ( gg_loc + ONE_THIRD * Rg2 * Rg2 ) * Rs2;
1640
1641 dst8[0][u] += R_rot( 0, 0 ) * v0 + R_rot( 1, 0 ) * v1 + R_rot( 2, 0 ) * v2;
1642 dst8[1][u] += R_rot( 0, 1 ) * v0 + R_rot( 1, 1 ) * v1 + R_rot( 2, 1 ) * v2;
1643 dst8[2][u] += R_rot( 0, 2 ) * v0 + R_rot( 1, 2 ) * v1 + R_rot( 2, 2 ) * v2;
1644 };
1645
1646 if ( cmb_freeslip )
1647 {
1648 for ( int ni = 0; ni < 3; ++ni )
1649 apply_rotated_diag( ni, ni, lvl0 );
1650 }
1651 if ( surf_freeslip )
1652 {
1653 for ( int ni = 0; ni < 3; ++ni )
1654 apply_rotated_diag( ni, ni + 3, lvl0 + 1 );
1655 }
1656 }
1657 }
1658 }
1659
1660 // Test-side projection for free-slip (P A P) — normals loaded from shared memory.
1661 if ( !Diagonal && cmb_freeslip )
1662 {
1663 for ( int c = 0; c < 4; ++c )
1664 {
1665 const double nx = normals_sh( corner_node[c], 0 );
1666 const double ny = normals_sh( corner_node[c], 1 );
1667 const double nz = normals_sh( corner_node[c], 2 );
1668 const int u = CMB_CORNER_TO_UNIQUE[c];
1669 const double dot = nx * dst8[0][u] + ny * dst8[1][u] + nz * dst8[2][u];
1670 dst8[0][u] -= dot * nx;
1671 dst8[1][u] -= dot * ny;
1672 dst8[2][u] -= dot * nz;
1673 }
1674 }
1675 if ( !Diagonal && surf_freeslip )
1676 {
1677 for ( int c = 0; c < 4; ++c )
1678 {
1679 const double nx = normals_sh( corner_node[c], 0 );
1680 const double ny = normals_sh( corner_node[c], 1 );
1681 const double nz = normals_sh( corner_node[c], 2 );
1682 const int u = SURF_CORNER_TO_UNIQUE[c];
1683 const double dot = nx * dst8[0][u] + ny * dst8[1][u] + nz * dst8[2][u];
1684 dst8[0][u] -= dot * nx;
1685 dst8[1][u] -= dot * ny;
1686 dst8[2][u] -= dot * nz;
1687 }
1688 }
1689
1690 // Add back normal correction: Ann_acc[c] * u_n[c] * n_c.
1691 // u_n recomputed from original (unprojected) src in shared memory.
1692 if ( !Diagonal && cmb_freeslip )
1693 {
1694 for ( int c = 0; c < 4; ++c )
1695 {
1696 const int cn = corner_node[c];
1697 const double nx = normals_sh( cn, 0 );
1698 const double ny = normals_sh( cn, 1 );
1699 const double nz = normals_sh( cn, 2 );
1700 const double os0 = src_sh( cn, 0, lvl0 );
1701 const double os1 = src_sh( cn, 1, lvl0 );
1702 const double os2 = src_sh( cn, 2, lvl0 );
1703 const double u_n_val = nx * os0 + ny * os1 + nz * os2;
1704 const double corr = Ann_acc_cmb[c] * u_n_val;
1705 const int u = CMB_CORNER_TO_UNIQUE[c];
1706 dst8[0][u] += corr * nx;
1707 dst8[1][u] += corr * ny;
1708 dst8[2][u] += corr * nz;
1709 }
1710 }
1711 if ( !Diagonal && surf_freeslip )
1712 {
1713 for ( int c = 0; c < 4; ++c )
1714 {
1715 const int cn = corner_node[c];
1716 const double nx = normals_sh( cn, 0 );
1717 const double ny = normals_sh( cn, 1 );
1718 const double nz = normals_sh( cn, 2 );
1719 const double os0 = src_sh( cn, 0, lvl0 + 1 );
1720 const double os1 = src_sh( cn, 1, lvl0 + 1 );
1721 const double os2 = src_sh( cn, 2, lvl0 + 1 );
1722 const double u_n_val = nx * os0 + ny * os1 + nz * os2;
1723 const double corr = Ann_acc_surf[c] * u_n_val;
1724 const int u = SURF_CORNER_TO_UNIQUE[c];
1725 dst8[0][u] += corr * nx;
1726 dst8[1][u] += corr * ny;
1727 dst8[2][u] += corr * nz;
1728 }
1729 }
1730
1731 // Scatter accumulated hex-cell contributions to global memory.
1732#pragma unroll
1733 for ( int dim_add = 0; dim_add < 3; ++dim_add )
1734 {
1735 Kokkos::atomic_add( &dst_( local_subdomain_id, x_cell, y_cell, r_cell, dim_add ), dst8[dim_add][0] );
1736 Kokkos::atomic_add( &dst_( local_subdomain_id, x_cell + 1, y_cell, r_cell, dim_add ), dst8[dim_add][1] );
1737 Kokkos::atomic_add( &dst_( local_subdomain_id, x_cell, y_cell + 1, r_cell, dim_add ), dst8[dim_add][2] );
1738 Kokkos::atomic_add( &dst_( local_subdomain_id, x_cell, y_cell, r_cell + 1, dim_add ), dst8[dim_add][3] );
1739 Kokkos::atomic_add(
1740 &dst_( local_subdomain_id, x_cell + 1, y_cell, r_cell + 1, dim_add ), dst8[dim_add][4] );
1741 Kokkos::atomic_add(
1742 &dst_( local_subdomain_id, x_cell, y_cell + 1, r_cell + 1, dim_add ), dst8[dim_add][5] );
1743 Kokkos::atomic_add(
1744 &dst_( local_subdomain_id, x_cell + 1, y_cell + 1, r_cell, dim_add ), dst8[dim_add][6] );
1745 Kokkos::atomic_add(
1746 &dst_( local_subdomain_id, x_cell + 1, y_cell + 1, r_cell + 1, dim_add ), dst8[dim_add][7] );
1747 }
1748 }
1749
1750 public:
1751 /**
1752 * @brief Legacy generic team operator.
1753 *
1754 * Kept for compatibility/debugging, but no longer used by apply_impl().
1755 * The host now dispatches directly to path-specific kernels.
1756 *
1757 * This function still works, but it reintroduces a branch on `kernel_path_`
1758 * and should therefore be avoided in performance-critical use.
1759 */
1760 KOKKOS_INLINE_FUNCTION
1761 void operator()( const Team& team ) const
1762 {
1763 int local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell;
1764 decode_team_indices( team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell );
1765
1766 if ( tr >= r_tile_ )
1767 return;
1768
1769 const bool at_cmb = has_flag( local_subdomain_id, x_cell, y_cell, r_cell, CMB );
1770 const bool at_surface = has_flag( local_subdomain_id, x_cell, y_cell, r_cell + 1, SURFACE );
1771
1772 if ( kernel_path_ == KernelPath::Slow )
1773 {
1774 operator_slow_path(
1775 team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell, at_cmb, at_surface );
1776 }
1777 else if ( kernel_path_ == KernelPath::FastFreeslip )
1778 {
1779 if ( diagonal_ )
1780 operator_fast_freeslip_path< true >(
1781 team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell, at_cmb, at_surface );
1782 else
1783 operator_fast_freeslip_path< false >(
1784 team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell, r_cell, at_cmb, at_surface );
1785 }
1786 else
1787 {
1788 if ( diagonal_ )
1789 operator_fast_dirichlet_neumann_path< true >(
1790 team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell );
1791 else
1792 operator_fast_dirichlet_neumann_path< false >(
1793 team, local_subdomain_id, x0, y0, r0, tx, ty, tr, x_cell, y_cell );
1794 }
1795 }
1796
1797 KOKKOS_INLINE_FUNCTION void assemble_trial_test_vecs(
1798 const int wedge,
1799 const dense::Vec< ScalarType, VecDim >& quad_point,
1800 const ScalarType quad_weight,
1801 const ScalarT r_1,
1802 const ScalarT r_2,
1803 dense::Vec< ScalarT, 3 > ( *wedge_phy_surf )[3],
1804 const dense::Vec< ScalarT, 6 >* k_local_hex,
1805 const int dimi,
1806 const int dimj,
1809 ScalarType& jdet_keval_quadweight ) const
1810 {
1811 dense::Mat< ScalarType, VecDim, VecDim > J = jac( wedge_phy_surf[wedge], r_1, r_2, quad_point );
1812 const auto det = J.det();
1813 const auto abs_det = Kokkos::abs( det );
1814 const dense::Mat< ScalarType, VecDim, VecDim > J_inv_transposed = J.inv_transposed( det );
1815
1816 ScalarType k_eval = 0.0;
1817 for ( int k = 0; k < num_nodes_per_wedge; k++ )
1818 {
1819 k_eval += shape( k, quad_point ) * k_local_hex[wedge]( k );
1820 }
1821
1822 for ( int k = 0; k < num_nodes_per_wedge; k++ )
1823 {
1824 sym_grad_i[k] = symmetric_grad( J_inv_transposed, quad_point, k, dimi );
1825 sym_grad_j[k] = symmetric_grad( J_inv_transposed, quad_point, k, dimj );
1826 }
1827
1828 jdet_keval_quadweight = quad_weight * k_eval * abs_det;
1829 }
1830
1831 /**
1832 * @brief Assemble one wedge-local 18x18 matrix (slow path / on-demand assembly).
1833 */
1834 KOKKOS_INLINE_FUNCTION
1836 const int local_subdomain_id,
1837 const int x_cell,
1838 const int y_cell,
1839 const int r_cell,
1840 const int wedge ) const
1841 {
1843 wedge_surface_physical_coords( wedge_phy_surf, grid_, local_subdomain_id, x_cell, y_cell );
1844
1845 const ScalarT r_1 = radii_( local_subdomain_id, r_cell );
1846 const ScalarT r_2 = radii_( local_subdomain_id, r_cell + 1 );
1847
1849 extract_local_wedge_scalar_coefficients( k_local_hex, local_subdomain_id, x_cell, y_cell, r_cell, k_ );
1850
1852
1853 for ( int dimi = 0; dimi < 3; ++dimi )
1854 {
1855 for ( int dimj = 0; dimj < 3; ++dimj )
1856 {
1857 for ( int q = 0; q < num_quad_points; q++ )
1858 {
1861 ScalarType jdet_keval_quadweight = 0;
1862
1864 wedge,
1865 quad_points[q],
1866 quad_weights[q],
1867 r_1,
1868 r_2,
1869 wedge_phy_surf,
1870 k_local_hex,
1871 dimi,
1872 dimj,
1873 sym_grad_i,
1874 sym_grad_j,
1875 jdet_keval_quadweight );
1876
1877 for ( int i = 0; i < num_nodes_per_wedge; i++ )
1878 {
1879 for ( int j = 0; j < num_nodes_per_wedge; j++ )
1880 {
1881 A( i + dimi * num_nodes_per_wedge, j + dimj * num_nodes_per_wedge ) +=
1882 jdet_keval_quadweight *
1883 ( 2 * sym_grad_j[j].double_contract( sym_grad_i[i] ) -
1884 2.0 / 3.0 * sym_grad_j[j]( dimj, dimj ) * sym_grad_i[i]( dimi, dimi ) );
1885 }
1886 }
1887 }
1888 }
1889 }
1890
1891 return A;
1892 }
1893};
1894
1897
1898} // namespace terra::fe::wedge::operators::shell::epsdivdiv_history
Definition communication_plan.hpp:33
Send and receive buffers for all process-local subdomain boundaries.
Definition communication.hpp:56
Matrix-free / matrix-based epsilon-div-div operator on wedge elements in a spherical shell.
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:58
void set_stored_matrix_mode(linalg::OperatorStoredMatrixMode operator_stored_matrix_mode, int level_range, grid::Grid4DDataScalar< ScalarType > GCAElements)
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:255
size_t team_shmem_size_dn(const int) const
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:479
bool has_flag(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, grid::shell::ShellBoundaryFlag flag) const
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:245
const grid::shell::DistributedDomain & get_domain() const
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:240
size_t team_shmem_size(const int ts) const
Team scratch memory size for fast paths.
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:465
dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > assemble_local_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge) const
Assemble one wedge-local 18x18 matrix (slow path / on-demand assembly).
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:1835
ScalarT ScalarType
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:62
grid::Grid3DDataVec< ScalarT, 3 > get_grid()
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:242
void set_boundary_conditions(BoundaryConditions bcs)
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:230
void operator()(const Team &team) const
Legacy generic team operator.
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:1761
linalg::OperatorStoredMatrixMode get_stored_matrix_mode()
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:278
static constexpr int LocalMatrixDim
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:63
const grid::Grid4DDataScalar< ScalarType > & k_grid_data()
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:239
void set_operator_apply_and_communication_modes(const linalg::OperatorApplyMode operator_apply_mode, const linalg::OperatorCommunicationMode operator_communication_mode)
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:220
terra::grid::Grid4DDataMatrices< ScalarType, LocalMatrixDim, LocalMatrixDim, 2 > Grid4DDataLocalMatrices
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:64
void set_diagonal(bool v)
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:228
dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > get_local_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge) const
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:294
void column_grad_to_sym(const int dim, const double g0, const double g1, const double g2, double &E00, double &E11, double &E22, double &sym01, double &sym02, double &sym12, double &gdd) const
Convert one gradient column into symmetric-gradient and div contributions.
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:420
void set_local_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge, const dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > &mat) const
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:281
Kokkos::TeamPolicy<>::member_type Team
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:66
EpsilonDivDivKerngenV09SeparateScatter(const grid::shell::DistributedDomain &domain, const grid::Grid3DDataVec< ScalarT, 3 > &grid, const grid::Grid2DDataScalar< ScalarT > &radii, const grid::Grid4DDataScalar< grid::shell::ShellBoundaryFlag > &mask, const grid::Grid4DDataScalar< ScalarT > &k, BoundaryConditions bcs, bool diagonal, linalg::OperatorApplyMode operator_apply_mode=linalg::OperatorApplyMode::Replace, linalg::OperatorCommunicationMode operator_communication_mode=linalg::OperatorCommunicationMode::CommunicateAdditively, linalg::OperatorStoredMatrixMode operator_stored_matrix_mode=linalg::OperatorStoredMatrixMode::Off)
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:157
grid::Grid2DDataScalar< ScalarT > get_radii() const
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:241
void apply_impl(const SrcVectorType &src, DstVectorType &dst)
Apply the operator: dst <- Op(src) (or additive, depending on mode).
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:325
void assemble_trial_test_vecs(const int wedge, const dense::Vec< ScalarType, VecDim > &quad_point, const ScalarType quad_weight, const ScalarT r_1, const ScalarT r_2, dense::Vec< ScalarT, 3 >(*wedge_phy_surf)[3], const dense::Vec< ScalarT, 6 > *k_local_hex, const int dimi, const int dimj, dense::Mat< ScalarType, VecDim, VecDim > *sym_grad_i, dense::Mat< ScalarType, VecDim, VecDim > *sym_grad_j, ScalarType &jdet_keval_quadweight) const
Definition epsilon_divdiv_kerngen_v09_separate_scatter.hpp:1797
Parallel data structure organizing the thick spherical shell metadata for distributed (MPI parallel) ...
Definition spherical_shell.hpp:2518
const std::map< SubdomainInfo, std::tuple< LocalSubdomainIdx, SubdomainNeighborhood > > & subdomains() const
Definition spherical_shell.hpp:2650
const DomainInfo & domain_info() const
Returns a const reference.
Definition spherical_shell.hpp:2647
Information about the thick spherical shell mesh.
Definition spherical_shell.hpp:780
const std::vector< double > & radii() const
Definition spherical_shell.hpp:845
int diamond_lateral_refinement_level() const
Definition spherical_shell.hpp:843
int subdomain_num_nodes_radially() const
Equivalent to calling subdomain_num_nodes_radially( subdomain_refinement_level() )
Definition spherical_shell.hpp:861
int subdomain_num_nodes_per_side_laterally() const
Equivalent to calling subdomain_num_nodes_per_side_laterally( subdomain_refinement_level() )
Definition spherical_shell.hpp:852
Static assertion: VectorQ1Scalar satisfies VectorLike concept.
Definition vector_q1.hpp:168
const grid::Grid4DDataVec< ScalarType, VecDim > & grid_data() const
Get const reference to grid data.
Definition vector_q1.hpp:288
bool has_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge) const
Checks for presence of a local matrix for a certain element.
Definition local_matrix_storage.hpp:223
dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > get_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge) const
Retrives the local matrix if there is stored local matrices, the desired local matrix is loaded and r...
Definition local_matrix_storage.hpp:175
void set_matrix(const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int wedge, dense::Mat< ScalarT, LocalMatrixDim, LocalMatrixDim > mat) const
Set the local matrix stored in the operator.
Definition local_matrix_storage.hpp:118
Timer supporting RAII scope or manual stop.
Definition timer.hpp:342
void stop()
Stop the timer and record elapsed time.
Definition timer.hpp:364
Concept for types that can be used as Galerkin coarse-grid operators in a multigrid hierarchy....
Definition operator.hpp:81
k_eval
Definition EpsilonDivDiv_kernel_gen.py:175
r_cell
Definition EpsilonDivDiv_kernel_gen.py:23
g2
Definition EpsilonDivDiv_kernel_gen.py:267
div_u
Definition EpsilonDivDiv_kernel_gen.py:359
cmb_shift
Definition EpsilonDivDiv_kernel_gen.py:98
g1
Definition EpsilonDivDiv_kernel_gen.py:266
J_det
Definition EpsilonDivDiv_kernel_gen.py:216
surface_shift
Definition EpsilonDivDiv_kernel_gen.py:98
constexpr int idx(const int loop_idx, const int size, const grid::BoundaryPosition position, const grid::BoundaryDirection direction)
Definition buffer_copy_kernels.hpp:73
void send_recv_with_plan(const ShellBoundaryCommPlan< GridDataType > &plan, const GridDataType &data, SubdomainNeighborhoodSendRecvBuffer< typename GridDataType::value_type, grid::grid_data_vec_dim< GridDataType >() > &recv_buffers, CommunicationReduction reduction=CommunicationReduction::SUM)
Definition communication_plan.hpp:652
Definition epsilon_divdiv_kerngen_v01_initial.hpp:16
constexpr void quad_felippa_1x1_quad_points(dense::Vec< T, 3 >(&quad_points)[quad_felippa_1x1_num_quad_points])
Definition wedge/quadrature/quadrature.hpp:36
constexpr void quad_felippa_1x1_quad_weights(T(&quad_weights)[quad_felippa_1x1_num_quad_points])
Definition wedge/quadrature/quadrature.hpp:43
constexpr int quad_felippa_1x1_num_quad_points
Definition wedge/quadrature/quadrature.hpp:32
constexpr int num_nodes_per_wedge_surface
Definition kernel_helpers.hpp:6
constexpr dense::Mat< T, 3, 3 > symmetric_grad(const dense::Mat< T, 3, 3 > &J_inv_transposed, const dense::Vec< T, 3 > &quad_point, const int dof, const int dim)
Returns the symmetric gradient of the shape function of a dof at a quadrature point.
Definition integrands.hpp:685
void wedge_surface_physical_coords(dense::Vec< T, 3 >(&wedge_surf_phy_coords)[num_wedges_per_hex_cell][num_nodes_per_wedge_surface], const grid::Grid3DDataVec< T, 3 > &lateral_grid, const int local_subdomain_id, const int x_cell, const int y_cell)
Extracts the (unit sphere) surface vertex coords of the two wedges of a hex cell.
Definition kernel_helpers.hpp:26
constexpr void reorder_local_dofs(const DoFOrdering doo_from, const DoFOrdering doo_to, dense::Vec< ScalarT, 18 > &dofs)
Definition kernel_helpers.hpp:619
void atomically_add_local_wedge_vector_coefficients(const grid::Grid4DDataVec< T, VecDim > &global_coefficients, const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int d, const dense::Vec< T, 6 > local_coefficients[2])
Performs an atomic add of the two local wedge coefficient vectors of a hex cell into the global coeff...
Definition kernel_helpers.hpp:465
constexpr int num_wedges_per_hex_cell
Definition kernel_helpers.hpp:5
void extract_local_wedge_scalar_coefficients(dense::Vec< T, 6 >(&local_coefficients)[2], const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const grid::Grid4DDataScalar< T > &global_coefficients)
Extracts the local vector coefficients for the two wedges of a hex cell from the global coefficient v...
Definition kernel_helpers.hpp:306
void extract_local_wedge_vector_coefficients(dense::Vec< T, 6 >(&local_coefficients)[2], const int local_subdomain_id, const int x_cell, const int y_cell, const int r_cell, const int d, const grid::Grid4DDataVec< T, VecDim > &global_coefficients)
Extracts the local vector coefficients for the two wedges of a hex cell from the global coefficient v...
Definition kernel_helpers.hpp:356
constexpr int num_nodes_per_wedge
Definition kernel_helpers.hpp:7
constexpr T shape(const int node_idx, const T xi, const T eta, const T zeta)
(Tensor-product) Shape function.
Definition integrands.hpp:146
constexpr dense::Mat< T, 3, 3 > jac(const dense::Vec< T, 3 > &p1_phy, const dense::Vec< T, 3 > &p2_phy, const dense::Vec< T, 3 > &p3_phy, const T r_1, const T r_2, const T xi, const T eta, const T zeta)
Definition integrands.hpp:657
dense::Vec< typename CoordsShellType::value_type, 3 > coords(const int subdomain, const int x, const int y, const int r, const CoordsShellType &coords_shell, const CoordsRadiiType &coords_radii)
Definition spherical_shell.hpp:2871
BoundaryConditionMapping[2] BoundaryConditions
Definition shell/bit_masks.hpp:37
ShellBoundaryFlag
FlagLike that indicates boundary types for the thick spherical shell.
Definition shell/bit_masks.hpp:12
BoundaryConditionFlag get_boundary_condition_flag(const BoundaryConditions bcs, ShellBoundaryFlag sbf)
Retrieve the boundary condition flag that is associated with a location in the shell e....
Definition shell/bit_masks.hpp:42
BoundaryConditionFlag
FlagLike that indicates the type of boundary condition
Definition shell/bit_masks.hpp:25
Kokkos::View< dense::Mat< ScalarType, Rows, Cols > ****[NumMatrices], Layout > Grid4DDataMatrices
Definition grid_types.hpp:173
Kokkos::View< ScalarType ***[VecDim], Layout > Grid3DDataVec
Definition grid_types.hpp:42
Kokkos::View< ScalarType ****, Layout > Grid4DDataScalar
Definition grid_types.hpp:27
Kokkos::View< ScalarType **, Layout > Grid2DDataScalar
Definition grid_types.hpp:21
dense::Mat< ScalarType, 3, 3 > trafo_mat_cartesian_to_normal_tangential(const dense::Vec< ScalarType, 3 > &n_input)
Constructs a robust orthonormal transformation matrix from Cartesian to (normal–tangential–tangential...
Definition local_basis_trafo_normal_tangential.hpp:36
ScalarOf< Vector > dot(const Vector &y, const Vector &x)
Compute the dot product of two vectors. Implements: .
Definition vector.hpp:118
OperatorApplyMode
Modes for applying an operator to a vector.
Definition operator.hpp:30
@ Replace
Overwrite the destination vector.
OperatorStoredMatrixMode
Modes for applying stored matrices.
Definition operator.hpp:47
@ Selective
Use stored matrices on selected, marked elements only, assemble on all others.
@ Full
Use stored matrices on all elements.
@ Off
Do not use stored matrices.
OperatorCommunicationMode
Modes for communication during operator application.
Definition operator.hpp:40
@ CommunicateAdditively
Communicate and add results.
constexpr bool has_flag(E mask_value, E flag) noexcept
Checks if a bitmask value contains a specific flag.
Definition bit_masking.hpp:43
detail::PrefixCout logroot([]() { return detail::log_prefix();})
std::ostream subclass that just logs on root and adds a timestamp for each line.
Definition mat.hpp:10
constexpr Mat diagonal() const
Definition mat.hpp:377
constexpr Mat< T, Cols, Rows > transposed() const
Definition mat.hpp:187
Mat & hadamard_product(const Mat &mat)
Definition mat.hpp:213
T double_contract(const Mat &mat)
Definition mat.hpp:226
SoA (Structure-of-Arrays) 4D vector grid data.
Definition grid_types.hpp:51