Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 96 additions & 2 deletions src/pem.jl
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ represented by the vector of elements d.

Parameters
----------
- d :: Vector
- d :: Vector{<:Real}
Distribution under interest
- N :: Integer
Number of desired estimate points
Expand All @@ -88,7 +88,7 @@ Returns

"""
function pem(
d::Vector,
d::Vector{<:Real},
N::Integer;
Comment on lines 90 to 92
Copy link

Copilot AI Apr 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The signature was narrowed to d::Vector{<:Real}, but the docstring above still documents the parameter as d :: Vector (generic). Please update the docstring to reflect the new accepted type (and consider AbstractVector{<:Real} if you want to keep supporting views/subarrays while still avoiding the dispatch ambiguity).

Copilot uses AI. Check for mistakes.
mean_fun::Function=Distributions.mean,
central_moment_fun::Function=Distributions.moment,
Expand Down Expand Up @@ -202,5 +202,99 @@ function pem(
x = ϵ .+ mean_value

# results = NamedTuple{(:x, :p)}.(zip(x, p))
return (x=x, p=p)
end


"""
pem(distributions, N; mean_fun=mean, central_moment_fun=moment, optimizer=HiGHS.Optimizer)

Point Estimate Method to identify estimate points for K independent univariate distributions.

For K independent distributions each with N concentrations, the multivariate representation
uses K*N total concentration points. Each concentration point for variable k at concentration n
has all other variables set to their means:

x_{k,n} = (μ_1, ..., μ_{k-1}, x_{k,n}, μ_{k+1}, ..., μ_K)

with weight p_{k,n} / K, where p_{k,n} is the univariate weight for distribution k at
concentration n. This ensures all weights sum to 1.

This function implements the multivariate extension described in:
- H.P. Hong, An efficient point estimate method for probabilistic analysis,
Reliability Engineering and System Safety, 1998, https://doi.org/10.1016/S0951-8320(97)00071-9

Parameters
----------
- distributions :: Vector{<:UnivariateDistribution}
Vector of K independent univariate distributions
- N :: Union{Integer, Vector{<:Integer}}
Number of desired estimate points per distribution. If an Integer is provided,
the same N is used for all distributions. If a Vector is provided, N[k] is
used for the k-th distribution.
- mean_fun :: Function (optional)
Function used to calculate the mean value of each distribution
- central_moment_fun :: Function (optional)
Function used to calculate the central moment of each distribution
- montecarlo_sampling :: Integer (optional, default 1000000)
Number of Monte Carlo samples used if a non-specific moment function is available
- optimizer (optional)
JuMP optimizer for executing the optimization

Returns
-------
- (x, p) :: NamedTuple
- x :: Matrix{Float64}
Matrix of shape (K, M) where M = sum(N_1, ..., N_K). Each column is one
concentration point: only one variable deviates from its mean.
- p :: Vector{Float64}
Weight of each concentration point (p_{k,n} / K).

"""
function pem(
distributions::Vector{<:UnivariateDistribution},
N::Union{Integer, Vector{<:Integer}};
mean_fun::Function=Distributions.mean,
central_moment_fun::Function=Distributions.moment,
montecarlo_sampling::Integer=1000000,
optimizer=DEFAULT_SOLVER,
)

K = length(distributions)
Ns = N isa Integer ? fill(N, K) : N

@assert length(Ns) == K "Length of N vector must match the number of distributions (got $(length(Ns)), expected $K)"

# Apply univariate pem to each distribution independently
results = [
pem(distributions[k], Ns[k];
mean_fun=mean_fun,
central_moment_fun=central_moment_fun,
montecarlo_sampling=montecarlo_sampling,
optimizer=optimizer)
for k in 1:K
]

# Mean of each distribution
μ = [mean_fun(distributions[k]) for k in 1:K]

# Build K*N concentration points following Hong (1998):
# For variable k at concentration n, all other variables stay at their mean.
M = sum(Ns)
x = Matrix{Float64}(undef, K, M)
p = Vector{Float64}(undef, M)

j = 1
for k in 1:K
for n in 1:Ns[k]
# All variables at their mean, except variable k
x[:, j] .= μ
x[k, j] = results[k].x[n]
# Weight: p_{k,n} / K
p[j] = results[k].p[n] / K
j += 1
end
end

return (x=x, p=p)
end
21 changes: 21 additions & 0 deletions test/Examples.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,25 @@ module Examples
Example("Normal_truncated_9", truncated(Normal(1.0, 0.4), 0.0, +Inf), 9),
]

struct MultivariateExample
name # name of test
d # Vector of distributions
N # Number of estimate points (Integer or Vector)
end

list_multivariate_examples = [
MultivariateExample("Normal2_N2", [Normal(), Normal()], 2),
MultivariateExample("Normal2_N3", [Normal(), Normal()], 3),
MultivariateExample(
"Normal_TruncNormal_N2",
[Normal(), truncated(Normal(1.0, 0.4), 0.0, +Inf)],
2,
),
MultivariateExample(
"Normal_TruncNormal_mixed_N",
[Normal(), truncated(Normal(1.0, 0.4), 0.0, +Inf)],
[2, 3],
),
]

end
47 changes: 47 additions & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,59 @@ function test_example(example_name, testing_function, args...)

end

"Function to test multivariate examples"
function test_multivariate_example(example_name, testing_function, args...)

# for reproducibility
Distributions.Random.seed!(SEED)

# calculate simulations
calc_solution = testing_function(args...)

path_solution = joinpath(BASE_FOLDER, "test", "testcases", string(testing_function) * "_multivariate", example_name * ".yml")

# Sort columns lexicographically for deterministic comparison
I_calc = sortperm(eachcol(calc_solution.x), by=col -> Tuple(col))

x_sorted = calc_solution.x[:, I_calc]
p_sorted = calc_solution.p[I_calc]

if isfile(path_solution)
# if the file exists run tests
proven_solution = YAML.load_file(path_solution)

proven_x = Matrix(hcat([Float64.(row) for row in proven_solution["x"]]...)')
proven_p = Float64.(proven_solution["p"])

@test size(x_sorted) == size(proven_x)
@test vector_approx_test(vec(x_sorted), vec(proven_x))
@test vector_approx_test(p_sorted, proven_p)
else
# otherwise create the tests
mkpath(dirname(path_solution))

dict_calc_solution = Dict(
"x" => [x_sorted[k, :] for k in 1:size(x_sorted, 1)],
"p" => p_sorted,
)

YAML.write_file(path_solution, dict_calc_solution)
@warn("Preloaded solution not found, then it has been created\nPath: $path_solution")
end

end

list_examples = Examples.list_examples
list_multivariate_examples = Examples.list_multivariate_examples

@testset "tests" begin

for test in list_examples
test_example(test.name, pem, test.d, test.N)
end

for test in list_multivariate_examples
test_multivariate_example(test.name, pem, test.d, test.N)
end
end

16 changes: 16 additions & 0 deletions test/testcases/pem_multivariate/Normal2_N2.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
x:
-
- -1.0
- 0.0
- 0.0
- 1.0
-
- 0.0
- -1.0
- 1.0
- 0.0
p:
- 0.25
- 0.25
- 0.25
- 0.25
22 changes: 22 additions & 0 deletions test/testcases/pem_multivariate/Normal2_N3.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
x:
-
- -1.7320508075688776
- 0.0
- 0.0
- 0.0
- 0.0
- 1.7320508075688772
-
- 0.0
- -1.7320508075688776
- 0.0
- 0.0
- 1.7320508075688772
- 0.0
p:
- 0.0833333333333333
- 0.0833333333333333
- 0.33333333333333337
- 0.33333333333333337
- 0.08333333333333331
- 0.08333333333333331
16 changes: 16 additions & 0 deletions test/testcases/pem_multivariate/Normal_TruncNormal_N2.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
x:
-
- -1.0
- 0.0
- 0.0
- 1.0
-
- 1.0070551301947668
- 0.6355198478077098
- 1.4184929618467512
- 1.0070551301947668
p:
- 0.25
- 0.26274342969642217
- 0.23725657030357783
- 0.25
19 changes: 19 additions & 0 deletions test/testcases/pem_multivariate/Normal_TruncNormal_mixed_N.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
x:
-
- -1.0
- 0.0
- 0.0
- 0.0
- 1.0
-
- 1.0070551301947668
- 0.4089295792763922
- 1.0532985603054232
- 1.7295827634987933
- 1.0070551301947668
p:
- 0.25
- 0.10944862748508999
- 0.3204603371606533
- 0.07009103535425674
- 0.25