Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
trixi-framework
GitHub Repository: trixi-framework/Trixi.jl
Path: blob/main/src/callbacks_step/analysis_dg3d_parallel.jl
2055 views
1
# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
2
# Since these FMAs can increase the performance of many numerical algorithms,
3
# we need to opt-in explicitly.
4
# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
5
@muladd begin
6
#! format: noindent
7
8
function calc_error_norms(func, u, t, analyzer,
9
mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}},
10
equations,
11
initial_condition, dg::DGSEM, cache, cache_analysis)
12
@unpack vandermonde, weights = analyzer
13
@unpack node_coordinates, inverse_jacobian = cache.elements
14
@unpack u_local, u_tmp1, u_tmp2, x_local, x_tmp1, x_tmp2, jacobian_local, jacobian_tmp1, jacobian_tmp2 = cache_analysis
15
16
# Set up data structures
17
l2_error = zero(func(get_node_vars(u, equations, dg, 1, 1, 1, 1), equations))
18
linf_error = copy(l2_error)
19
volume = zero(real(mesh))
20
21
# Iterate over all elements for error calculations
22
for element in eachelement(dg, cache)
23
# Interpolate solution and node locations to analysis nodes
24
multiply_dimensionwise!(u_local, vandermonde, view(u, :, :, :, :, element),
25
u_tmp1, u_tmp2)
26
multiply_dimensionwise!(x_local, vandermonde,
27
view(node_coordinates, :, :, :, :, element), x_tmp1,
28
x_tmp2)
29
multiply_scalar_dimensionwise!(jacobian_local, vandermonde,
30
inv.(view(inverse_jacobian, :, :, :, element)),
31
jacobian_tmp1, jacobian_tmp2)
32
33
# Calculate errors at each analysis node
34
for k in eachnode(analyzer), j in eachnode(analyzer), i in eachnode(analyzer)
35
u_exact = initial_condition(get_node_coords(x_local, equations, dg, i, j,
36
k), t, equations)
37
diff = func(u_exact, equations) -
38
func(get_node_vars(u_local, equations, dg, i, j, k), equations)
39
# We take absolute value as we need the Jacobian here for the volume calculation
40
abs_jacobian_local_ijk = abs(jacobian_local[i, j, k])
41
42
l2_error += diff .^ 2 *
43
(weights[i] * weights[j] * weights[k] * abs_jacobian_local_ijk)
44
linf_error = @. max(linf_error, abs(diff))
45
volume += weights[i] * weights[j] * weights[k] * abs_jacobian_local_ijk
46
end
47
end
48
49
# Accumulate local results on root process
50
global_l2_error = Vector(l2_error)
51
global_linf_error = Vector(linf_error)
52
MPI.Reduce!(global_l2_error, +, mpi_root(), mpi_comm())
53
# Base.max instead of max needed, see comment in src/auxiliary/math.jl
54
MPI.Reduce!(global_linf_error, Base.max, mpi_root(), mpi_comm())
55
total_volume = MPI.Reduce(volume, +, mpi_root(), mpi_comm())
56
if mpi_isroot()
57
l2_error = convert(typeof(l2_error), global_l2_error)
58
linf_error = convert(typeof(linf_error), global_linf_error)
59
# For L2 error, divide by total volume
60
l2_error = @. sqrt(l2_error / total_volume)
61
else
62
l2_error = convert(typeof(l2_error), NaN * global_l2_error)
63
linf_error = convert(typeof(linf_error), NaN * global_linf_error)
64
end
65
66
return l2_error, linf_error
67
end
68
69
function integrate_via_indices(func::Func, u,
70
mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}},
71
equations,
72
dg::DGSEM, cache, args...; normalize = true) where {Func}
73
@unpack weights = dg.basis
74
75
# Initialize integral with zeros of the right shape
76
# Pass `zeros(eltype(u), nvariables(equations), nnodes(dg), nnodes(dg), nnodes(dg), 1)`
77
# to `func` since `u` might be empty, if the current rank has no elements.
78
# See also https://github.com/trixi-framework/Trixi.jl/issues/1096, and
79
# https://github.com/trixi-framework/Trixi.jl/pull/2126/files/7cbc57cfcba93e67353566e10fce1f3edac27330#r1814483243.
80
integral = zero(func(zeros(eltype(u), nvariables(equations), nnodes(dg), nnodes(dg),
81
nnodes(dg), 1), 1, 1, 1, 1, equations, dg, args...))
82
volume = zero(real(mesh))
83
84
# Use quadrature to numerically integrate over entire domain
85
@batch reduction=((+, integral), (+, volume)) for element in eachelement(dg, cache)
86
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
87
volume_jacobian = abs(inv(cache.elements.inverse_jacobian[i, j, k, element]))
88
integral += volume_jacobian * weights[i] * weights[j] * weights[k] *
89
func(u, i, j, k, element, equations, dg, args...)
90
volume += volume_jacobian * weights[i] * weights[j] * weights[k]
91
end
92
end
93
94
global_integral = MPI.Reduce!(Ref(integral), +, mpi_root(), mpi_comm())
95
total_volume = MPI.Reduce(volume, +, mpi_root(), mpi_comm())
96
if mpi_isroot()
97
integral = convert(typeof(integral), global_integral[])
98
else
99
integral = convert(typeof(integral), NaN * integral)
100
total_volume = volume # non-root processes receive nothing from reduce -> overwrite
101
end
102
103
# Normalize with total volume
104
if normalize
105
integral = integral / total_volume
106
end
107
108
return integral
109
end
110
end # @muladd
111
112