Skip to content

Commit

Permalink
Add logical_divergence function
Browse files Browse the repository at this point in the history
  • Loading branch information
nilsvu committed Aug 24, 2024
1 parent 9639933 commit 99e3b5e
Show file tree
Hide file tree
Showing 4 changed files with 113 additions and 2 deletions.
46 changes: 46 additions & 0 deletions src/NumericalAlgorithms/LinearOperators/Divergence.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <array>
#include <cstddef>

#include "DataStructures/ApplyMatrices.hpp"
#include "DataStructures/ComplexDataVector.hpp"
#include "DataStructures/DataBox/Tag.hpp"
#include "DataStructures/DataVector.hpp"
Expand Down Expand Up @@ -61,6 +62,26 @@ void divergence(const gsl::not_null<Scalar<DataType>*> div_input,
}
}

template <typename ResultTensor, typename FluxTensor, size_t Dim>
void logical_divergence(const gsl::not_null<ResultTensor*> div_flux,
const FluxTensor& flux, const Mesh<Dim>& mesh) {
// Note: This function hasn't been optimized much at all. Feel free to
// optimize if needed!
static const Matrix identity_matrix{};
for (size_t d = 0; d < Dim; ++d) {
auto matrices = make_array<Dim>(std::cref(identity_matrix));
gsl::at(matrices, d) =
Spectral::differentiation_matrix(mesh.slice_through(d));
for (size_t storage_index = 0; storage_index < div_flux->size();
++storage_index) {
const auto div_flux_index = div_flux->get_tensor_index(storage_index);
const auto flux_index = prepend(div_flux_index, d);
div_flux->get(div_flux_index) +=
apply_matrices(matrices, flux.get(flux_index), mesh.extents());
}
}
}

#define DTYPE(data) BOOST_PP_TUPLE_ELEM(0, data)
#define DIM(data) BOOST_PP_TUPLE_ELEM(1, data)
#define FRAME(data) BOOST_PP_TUPLE_ELEM(2, data)
Expand All @@ -85,3 +106,28 @@ GENERATE_INSTANTIATIONS(INSTANTIATE, (DataVector, ComplexDataVector), (1, 2, 3),
#undef DIM
#undef FRAME
#undef INSTANTIATE

#define DTYPE(data) BOOST_PP_TUPLE_ELEM(0, data)
#define DIM(data) BOOST_PP_TUPLE_ELEM(1, data)
#define TENSOR(data) BOOST_PP_TUPLE_ELEM(2, data)

#define INSTANTIATION_SCALAR(r, data) \
template void logical_divergence( \
const gsl::not_null<Scalar<DTYPE(data)>*> div_flux, \
const tnsr::I<DTYPE(data), DIM(data), Frame::ElementLogical>& flux, \
const Mesh<DIM(data)>& mesh);
#define INSTANTIATION_TENSOR(r, data) \
template void logical_divergence( \
const gsl::not_null<tnsr::TENSOR(data) < DTYPE(data), DIM(data), \
Frame::Inertial>* > div_flux, \
const TensorMetafunctions::prepend_spatial_index< \
tnsr::TENSOR(data) < DTYPE(data), DIM(data), Frame::Inertial>, \
DIM(data), UpLo::Up, Frame::ElementLogical > &flux, \
const Mesh<DIM(data)>& mesh);

GENERATE_INSTANTIATIONS(INSTANTIATION_SCALAR, (DataVector, ComplexDataVector),
(1, 2, 3))
GENERATE_INSTANTIATIONS(INSTANTIATION_TENSOR, (DataVector, ComplexDataVector),
(1, 2, 3), (i, I))

#undef INSTANTIATION
23 changes: 23 additions & 0 deletions src/NumericalAlgorithms/LinearOperators/Divergence.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,29 @@ void divergence(gsl::not_null<Scalar<DataType>*> div_input,
DerivativeFrame>& inverse_jacobian);
/// @}

/// @{
/*!
* \brief Compute the divergence of fluxes in logical coordinates
*
* Applies the logical differentiation matrix to the fluxes in each dimension
* and sums over dimensions.
*
* \see divergence
*/
template <typename ResultTensor, typename FluxTensor, size_t Dim>
void logical_divergence(gsl::not_null<ResultTensor*> div_flux,
const FluxTensor& flux, const Mesh<Dim>& mesh);

template <typename FluxTags, size_t Dim>
auto logical_divergence(const Variables<FluxTags>& flux, const Mesh<Dim>& mesh)
-> Variables<db::wrap_tags_in<Tags::div, FluxTags>>;

template <typename... DivTags, typename... FluxTags, size_t Dim>
void logical_divergence(
gsl::not_null<Variables<tmpl::list<DivTags...>>*> div_flux,
const Variables<tmpl::list<FluxTags...>>& flux, const Mesh<Dim>& mesh);
/// @}

namespace Tags {
/*!
* \ingroup DataBoxTagsGroup
Expand Down
33 changes: 33 additions & 0 deletions src/NumericalAlgorithms/LinearOperators/Divergence.tpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,3 +96,36 @@ void divergence(
};
EXPAND_PACK_LEFT_TO_RIGHT(apply_div(FluxTags{}, DivTags{}));
}

template <typename FluxTags, size_t Dim>
Variables<db::wrap_tags_in<Tags::div, FluxTags>> logical_divergence(
const Variables<FluxTags>& flux, const Mesh<Dim>& mesh) {
Variables<db::wrap_tags_in<Tags::div, FluxTags>> div_flux(
flux.number_of_grid_points());
logical_divergence(make_not_null(&div_flux), flux, mesh);
return div_flux;
}

template <typename... ResultTags, typename... FluxTags, size_t Dim>
void logical_divergence(
const gsl::not_null<Variables<tmpl::list<ResultTags...>>*> div_flux,
const Variables<tmpl::list<FluxTags...>>& flux, const Mesh<Dim>& mesh) {
static_assert(
(... and
std::is_same_v<tmpl::front<typename FluxTags::type::index_list>,
SpatialIndex<Dim, UpLo::Up, Frame::ElementLogical>>),
"The first index of each flux must be an upper spatial index in "
"element-logical coordinates.");
static_assert(
(... and
std::is_same_v<
typename ResultTags::type,
TensorMetafunctions::remove_first_index<typename FluxTags::type>>),
"The result tensors must have the same type as the flux tensors with "
"their first index removed.");
div_flux->initialize(mesh.number_of_grid_points(), 0.);
// Note: This function hasn't been optimized much at all. Feel free to
// optimize if needed!
EXPAND_PACK_LEFT_TO_RIGHT(logical_divergence(
make_not_null(&get<ResultTags>(*div_flux)), get<FluxTags>(flux), mesh));
}
13 changes: 11 additions & 2 deletions tests/Unit/NumericalAlgorithms/LinearOperators/Test_Divergence.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,10 @@
#include "DataStructures/DataBox/Prefixes.hpp"
#include "DataStructures/DataBox/Tag.hpp"
#include "DataStructures/DataVector.hpp"
#include "DataStructures/Tensor/EagerMath/Determinant.hpp"
#include "DataStructures/Tensor/Tensor.hpp"
#include "DataStructures/Variables.hpp"
#include "DataStructures/Variables/FrameTransform.hpp"
#include "DataStructures/VariablesTag.hpp"
#include "Domain/CoordinateMaps/Affine.hpp"
#include "Domain/CoordinateMaps/CoordinateMap.hpp"
Expand Down Expand Up @@ -143,6 +145,7 @@ void test_divergence_impl(
const auto xi = logical_coordinates(mesh);
const auto x = coordinate_map(xi);
const auto inv_jacobian = coordinate_map.inv_jacobian(xi);
const auto det_jacobian = determinant(coordinate_map.jacobian(xi));
MathFunctions::TensorProduct<Dim> f(1.0, std::move(functions));
using flux_tags = two_fluxes<DataType, Dim, Frame>;
Variables<flux_tags> fluxes(num_grid_points);
Expand All @@ -154,7 +157,7 @@ void test_divergence_impl(
using DivFluxTag = Tags::div<FluxTag>;
get<DivFluxTag>(expected_div_fluxes) = FluxTag::divergence_of_flux(f, x);
});
const auto div_fluxes = divergence(fluxes, mesh, inv_jacobian);
auto div_fluxes = divergence(fluxes, mesh, inv_jacobian);
CHECK(div_fluxes.size() == expected_div_fluxes.size());
CHECK(Dim * div_fluxes.size() == fluxes.size());
Approx local_approx = Approx::custom().epsilon(1.e-11).scale(1.);
Expand All @@ -166,6 +169,12 @@ void test_divergence_impl(
const auto& expected =
get<Tags::div<Flux1<DataType, Dim, Frame>>>(div_fluxes);
CHECK_ITERABLE_CUSTOM_APPROX(expected, div_vector, local_approx);

// Test logical divergence
auto logical_fluxes =
transform::first_index_to_different_frame(fluxes, inv_jacobian);
logical_divergence(make_not_null(&div_fluxes), logical_fluxes, mesh);
CHECK_VARIABLES_CUSTOM_APPROX(div_fluxes, expected_div_fluxes, local_approx);
}

template <typename DataType>
Expand Down Expand Up @@ -314,7 +323,7 @@ void test_divergence_compute() {
}
} // namespace

// [[Timeout, 10]]
// [[Timeout, 20]]
SPECTRE_TEST_CASE("Unit.Numerical.LinearOperators.Divergence",
"[NumericalAlgorithms][LinearOperators][Unit]") {
test_divergence<DataVector>();
Expand Down

0 comments on commit 99e3b5e

Please sign in to comment.