diff --git a/examples/fixed_source/cooper1/input.py b/examples/fixed_source/cooper1/input.py index 082b26700..965e564b2 100644 --- a/examples/fixed_source/cooper1/input.py +++ b/examples/fixed_source/cooper1/input.py @@ -51,12 +51,20 @@ mcdc.tally.mesh_tally( scores=["flux"], - x=np.linspace(0.0, 4.0, 40), - y=np.linspace(0.0, 4.0, 40), + x=np.linspace(0.0, 4.0, 41), + y=np.linspace(0.0, 4.0, 41), +) + +mcdc.tally.cs_tally( + N_cs_bins=[150], + cs_bin_size=[5.0, 5.0], + x=np.linspace(0.0, 4.0, 41), + y=np.linspace(0.0, 4.0, 41), + scores=["flux"], ) # Setting -mcdc.setting(N_particle=50) +mcdc.setting(N_particle=1e3) mcdc.implicit_capture() # Run diff --git a/examples/fixed_source/cooper2/input.py b/examples/fixed_source/cooper2/input.py index b2363580b..43b2703fc 100644 --- a/examples/fixed_source/cooper2/input.py +++ b/examples/fixed_source/cooper2/input.py @@ -43,8 +43,8 @@ mcdc.tally.mesh_tally( scores=["flux"], - x=np.linspace(0.0, 4.0, 40), - y=np.linspace(0.0, 4.0, 40), + x=np.linspace(0.0, 4.0, 41), + y=np.linspace(0.0, 4.0, 41), ) # Setting diff --git a/examples/fixed_source/cooper_combo/cs_processing/cs_process.py b/examples/fixed_source/cooper_combo/cs_processing/cs_process.py new file mode 100644 index 000000000..bd0523d6f --- /dev/null +++ b/examples/fixed_source/cooper_combo/cs_processing/cs_process.py @@ -0,0 +1,102 @@ +import h5py +import numpy as np +import matplotlib.pyplot as plt +import scipy.fft as spfft +import time +from mcdc.main import cs_reconstruct, construct_cs_sampling_matrix_S + +try: + import cvxpy as cp +except ImportError: + print("CVXPY has not been installed. Please install it with 'pip install cvxpy'") + + +def rel_l2_error(recon, real): + recon = recon.flatten() + real = real.flatten() + + return np.linalg.norm(real - recon, ord=2) / np.linalg.norm(real, ord=2) + + +Nx = 40 +Ny = 40 +with h5py.File("output.h5", "r") as f: + S, N_dims, bin_size_pixels = construct_cs_sampling_matrix_S(f, Nx, Ny) + N_cs_bins = f["tallies"]["cs_tally_0"]["N_cs_bins"][()] + cs_results = f["tallies"]["cs_tally_0"]["flux"]["mean"][:] + mesh_results = f["tallies"]["mesh_tally_0"]["flux"]["mean"][:] + mesh_sdev = f["tallies"]["mesh_tally_0"]["flux"]["sdev"][:] + +lambda_array = [ + "mesh", + 0, + 5e-7, + 1e-6, + 5e-6, + 1e-5, + 5e-5, + 1e-4, + 5e-4, + 1e-3, + 5e-3, + 1e-2, + 5e-2, + 1e-1, + 2e-1, + 3e-1, +] +recon_array = [ + mesh_results if lam == "mesh" else cs_reconstruct(S, cs_results, lam, Nx, Ny) + for lam in lambda_array +] +rel_errors = [rel_l2_error(recon, mesh_results) for recon in recon_array] + + +# Plotting the reconstructions for different lambda values +fig, axes = plt.subplots(4, 4, figsize=(12, 12)) +for i, ax in enumerate(axes.flat): + l = lambda_array[i] + reconstruction = recon_array[i] + im = ax.imshow(reconstruction.T, origin="lower", extent=[0, 4, 0, 4]) + + ax.set_title(f"MC/DC Solution" if l == "mesh" else f"$\lambda$ = {l:.5g}") + ax.set_ylabel("y [cm]" if i % 4 == 0 else "") + ax.set_xlabel("x [cm]" if i >= 12 else "") + + # Add colorbar + cbar = fig.colorbar(im, ax=ax, orientation="vertical", shrink=1) + cbar.formatter.set_powerlimits((0, 0)) + +plt.suptitle( + "Basis Pursuit Denoising Reconstructions with Different Values of $\lambda$", + fontsize=16, +) +plt.tight_layout() +plt.savefig( + f"{N_dims}D_cooper_recons_{N_cs_bins}_{bin_size_pixels}{bin_size_pixels}.png" +) +plt.show() + + +# Plotting the relative errors +plt.plot(lambda_array[1:], rel_errors[1:], label="Reconstruction Errors") +plt.hlines( + np.linalg.norm(mesh_sdev.flatten(), ord=2) + / np.linalg.norm(mesh_results.flatten(), ord=2), + plt.xlim()[0], + plt.xlim()[1], + color="black", + linestyle="--", + label="Reference Std Dev", +) +plt.xscale("log") +plt.yscale("log") +plt.xlabel("$\lambda$") +plt.ylabel("Relative L$^2$ Error") +plt.title("Relative Error vs $\lambda$ - 2D Modified Cooper-Larsen Reconstructions") +plt.legend() +plt.tight_layout() +plt.savefig( + f"{N_dims}D_cooper_errors_{N_cs_bins}_{bin_size_pixels}{bin_size_pixels}.png" +) +plt.show() diff --git a/examples/fixed_source/cooper_combo/cs_processing/single_recon_cs_process.py b/examples/fixed_source/cooper_combo/cs_processing/single_recon_cs_process.py new file mode 100644 index 000000000..ed4a081e0 --- /dev/null +++ b/examples/fixed_source/cooper_combo/cs_processing/single_recon_cs_process.py @@ -0,0 +1,28 @@ +import h5py +import numpy as np +import matplotlib.pyplot as plt +import scipy.fft as spfft +import time +from mcdc.main import cs_reconstruct, construct_cs_sampling_matrix_S + +try: + import cvxpy as cp +except ImportError: + print("CVXPY has not been installed. Please install it with 'pip install cvxpy'") + +Nx = 40 +Ny = 40 +lambda_ = 1e-4 +with h5py.File("output.h5", "r") as f: + S, N_dims, bin_size_pixels = construct_cs_sampling_matrix_S(f, Nx, Ny) + N_cs_bins = f["tallies"]["cs_tally_0"]["N_cs_bins"][()] + cs_results = f["tallies"]["cs_tally_0"]["flux"]["mean"][:] + mesh_results = f["tallies"]["mesh_tally_0"]["flux"]["mean"][:] + mesh_sdev = f["tallies"]["mesh_tally_0"]["flux"]["sdev"][:] + +start_time = time.time() +recon = cs_reconstruct(S, cs_results, lambda_, Nx, Ny) +recon_time = time.time() - start_time + +plt.imshow(recon) +plt.show() diff --git a/examples/fixed_source/cooper_combo/input.py b/examples/fixed_source/cooper_combo/input.py index c84c4f4e9..d69a9c28b 100644 --- a/examples/fixed_source/cooper_combo/input.py +++ b/examples/fixed_source/cooper_combo/input.py @@ -60,8 +60,16 @@ y=np.linspace(0.0, 4.0, 41), ) +mcdc.tally.cs_tally( + N_cs_bins=[300], + cs_bin_size=[3.0, 3.0], + scores=["flux"], + x=np.linspace(0.0, 4.0, 41), + y=np.linspace(0.0, 4.0, 41), +) + # Setting -mcdc.setting(N_particle=1e2) +mcdc.setting(N_particle=1e3) mcdc.implicit_capture() # Run diff --git a/examples/fixed_source/cooper_combo/process.py b/examples/fixed_source/cooper_combo/process.py index 8ab5fad07..7bbfe3c9d 100644 --- a/examples/fixed_source/cooper_combo/process.py +++ b/examples/fixed_source/cooper_combo/process.py @@ -2,7 +2,6 @@ import h5py import numpy as np - # Load result with h5py.File("output.h5", "r") as f: tally = f["tallies/mesh_tally_0"] diff --git a/examples/fixed_source/kobayashi3-TD/cs_processing/cs_process.py b/examples/fixed_source/kobayashi3-TD/cs_processing/cs_process.py new file mode 100644 index 000000000..7a12ea853 --- /dev/null +++ b/examples/fixed_source/kobayashi3-TD/cs_processing/cs_process.py @@ -0,0 +1,105 @@ +import h5py +import numpy as np +import matplotlib.pyplot as plt +import scipy.fft as spfft +import time +from mcdc.main import cs_reconstruct, construct_cs_sampling_matrix_S + +try: + import cvxpy as cp +except ImportError: + print("CVXPY has not been installed. Please install it with 'pip install cvxpy'") + + +def rel_l2_error(recon, real): + recon = recon.flatten() + real = real.flatten() + + return np.linalg.norm(real - recon, ord=2) / np.linalg.norm(real, ord=2) + + +Nx = 30 +Ny = 50 +with h5py.File("output.h5", "r") as f: + S, N_dims, bin_size_pixels = construct_cs_sampling_matrix_S( + f, Nx, Ny, problem_lims=[60, 100, 60] + ) + print(N_dims, bin_size_pixels) + N_cs_bins = f["tallies"]["cs_tally_0"]["N_cs_bins"][()] + cs_results = f["tallies"]["cs_tally_0"]["flux"]["mean"][:] + mesh_results = f["tallies"]["mesh_tally_0"]["flux"]["mean"][:] + mesh_sdev = f["tallies"]["mesh_tally_0"]["flux"]["sdev"][:] + +lambda_array = [ + "mesh", + 0, + 5e-7, + 1e-6, + 5e-6, + 1e-5, + 5e-5, + 1e-4, + 5e-4, + 1e-3, + 5e-3, + 1e-2, + 5e-2, + 1e-1, + 2e-1, + 3e-1, +] +recon_array = [ + mesh_results if lam == "mesh" else cs_reconstruct(S, cs_results, lam, Nx, Ny) + for lam in lambda_array +] +rel_errors = [rel_l2_error(recon, mesh_results) for recon in recon_array] + + +# Plotting the reconstructions for different lambda values +fig, axes = plt.subplots(4, 4, figsize=(12, 12)) +for i, ax in enumerate(axes.flat): + l = lambda_array[i] + reconstruction = recon_array[i] + im = ax.imshow(reconstruction.T, origin="lower", extent=[0, 60, 0, 100]) + + ax.set_title(f"MC/DC Solution" if l == "mesh" else f"$\lambda$ = {l:.5g}") + ax.set_ylabel("y [cm]" if i % 4 == 0 else "") + ax.set_xlabel("x [cm]" if i >= 12 else "") + + # Add colorbar + cbar = fig.colorbar(im, ax=ax, orientation="vertical", shrink=1) + cbar.formatter.set_powerlimits((0, 0)) + +plt.suptitle( + "Basis Pursuit Denoising Reconstructions with Different Values of $\lambda$", + fontsize=16, +) +plt.tight_layout() +plt.savefig( + f"{N_dims}D_kobayashi_recons_{N_cs_bins}_{bin_size_pixels}{bin_size_pixels}.png" +) +plt.show() + + +# Plotting the relative errors +plt.plot(lambda_array[1:], rel_errors[1:], label="Reconstruction Errors") +plt.hlines( + np.linalg.norm(mesh_sdev.flatten(), ord=2) + / np.linalg.norm(mesh_results.flatten(), ord=2), + plt.xlim()[0], + plt.xlim()[1], + color="black", + linestyle="--", + label="Reference Std Dev", +) +plt.xscale("log") +plt.yscale("log") +plt.xlabel("$\lambda$") +plt.ylabel("Relative L$^2$ Error") +plt.title(f"Relative Error vs $\lambda$ - {N_dims}D Kobayashi Reconstructions") +plt.legend() +plt.tight_layout() +plt.savefig( + f"{N_dims}D_kobayashi_errors_{N_cs_bins}_{bin_size_pixels}{bin_size_pixels}.png" +) +plt.show() diff --git a/examples/fixed_source/kobayashi3-TD/cs_processing/single_recon_cs_process.py b/examples/fixed_source/kobayashi3-TD/cs_processing/single_recon_cs_process.py new file mode 100644 index 000000000..6f5c8b8f2 --- /dev/null +++ b/examples/fixed_source/kobayashi3-TD/cs_processing/single_recon_cs_process.py @@ -0,0 +1,27 @@ +import h5py +import numpy as np +import matplotlib.pyplot as plt +import scipy.fft as spfft +import time +from mcdc.main import cs_reconstruct, construct_cs_sampling_matrix_S + +try: + import cvxpy as cp +except ImportError: + print("CVXPY has not been installed. Please install it with 'pip install cvxpy'") + +Nx = 30 +Ny = 50 +lambda_ = 1e-4 +with h5py.File("output.h5", "r") as f: + S, N_dims, bin_size_pixels = construct_cs_sampling_matrix_S( + f, Nx, Ny, problem_lims=[60, 100, 60] + ) + N_cs_bins = f["tallies"]["cs_tally_0"]["N_cs_bins"][()] + cs_results = f["tallies"]["cs_tally_0"]["flux"]["mean"][:] + mesh_results = f["tallies"]["mesh_tally_0"]["flux"]["mean"][:] + mesh_sdev = f["tallies"]["mesh_tally_0"]["flux"]["sdev"][:] + +start_time = time.time() +recon = cs_reconstruct(S, cs_results, lambda_, Nx, Ny) +recon_time = time.time() - start_time diff --git a/examples/fixed_source/kobayashi3-TD/input.py b/examples/fixed_source/kobayashi3-TD/input.py index 98bef5e10..65cb4a320 100644 --- a/examples/fixed_source/kobayashi3-TD/input.py +++ b/examples/fixed_source/kobayashi3-TD/input.py @@ -60,26 +60,16 @@ scores=["flux"], x=np.linspace(0.0, 60.0, 31), y=np.linspace(0.0, 100.0, 51), - # t=np.linspace(0.0, 200.0, 21), - # g=np.array([-0.5, 3.5, 6.5]) # fast (0, 1, 2, 3) and thermal (4, 5, 6) groups ) -mcdc.tally.cell_tally(source_cell, scores=["flux"]) -mcdc.tally.cell_tally(void_cell, scores=["flux"]) -mcdc.tally.cell_tally(shield_cell, scores=["flux"]) - - mcdc.tally.cs_tally( - N_cs_bins=[150], - cs_bin_size=[8.0, 8.0], - x=np.linspace(0.0, 60.0, 31), - y=np.linspace(0.0, 100.0, 51), + N_cs_bins=[1000], + cs_bin_size=np.array([3.0, 3.0]), scores=["flux"], ) - # Setting -mcdc.setting(N_particle=1e2) +mcdc.setting(N_particle=1e3) # Run mcdc.run() diff --git a/examples/fixed_source/kobayashi3-TD/process.py b/examples/fixed_source/kobayashi3-TD/process.py index 211b2d891..3c8223d03 100644 --- a/examples/fixed_source/kobayashi3-TD/process.py +++ b/examples/fixed_source/kobayashi3-TD/process.py @@ -4,17 +4,12 @@ import h5py import matplotlib.animation as animation - # ============================================================================= # Plot results # ============================================================================= # Results with h5py.File("output.h5", "r") as f: - cs_recon = f["tallies/cs_tally_0/flux/reconstruction"][:] - plt.imshow(cs_recon) - plt.show() - tallies = f["tallies/mesh_tally_0"] flux = tallies["flux"] grid = tallies["grid"] @@ -29,8 +24,7 @@ phi = flux["mean"][:] phi_sd = flux["sdev"][:] - for i in range(len(f["input_deck"]["cell_tallies"])): - flux_score = f[f"tallies/cell_tally_{i}/flux"] - print( - f'cell {i+1} mean = {flux_score["mean"][()]}, sdev = {flux_score["sdev"][()]}' - ) + print(len(x)) + + plt.imshow(phi[:, :, 0]) + plt.show() diff --git a/examples/fixed_source/sphere_in_cube/cs_processing/cs_process.py b/examples/fixed_source/sphere_in_cube/cs_processing/cs_process.py new file mode 100644 index 000000000..2b56a75be --- /dev/null +++ b/examples/fixed_source/sphere_in_cube/cs_processing/cs_process.py @@ -0,0 +1,131 @@ +import h5py +import numpy as np +import matplotlib.pyplot as plt +import scipy.fft as spfft +import time +from mcdc.main import cs_reconstruct, construct_cs_sampling_matrix_S + +try: + import cvxpy as cp +except ImportError: + print("CVXPY has not been installed. Please install it with 'pip install cvxpy'") + + +def rel_l2_error(recon, real): + recon = recon.flatten() + real = real.flatten() + + return np.linalg.norm(real - recon, ord=2) / np.linalg.norm(real, ord=2) + + +# User only needs to change these dimensions. It dictates the resolution of the reconstruction. +# This file assumes that MC/DC was run with compressed sensing and a mesh tally, +# and uses both to find the relative error of each reconstruction. +# In this case, the size of the reconstruction should match the size of the mesh tally. +Nx = 20 +Ny = 20 +Nz = 20 + + +# Everything after this doesn't need to be touched +with h5py.File("../output.h5", "r") as f: + # Reading in the data from the output file to perform the compressed sensing reconstruction + S, N_dims, bin_size_pixels = construct_cs_sampling_matrix_S(f, Nx, Ny, Nz) + N_cs_bins = f["tallies"]["cs_tally_0"]["N_cs_bins"][()] + cs_results = f["tallies"]["cs_tally_0"]["fission"]["mean"][:] + mesh_results = f["tallies"]["mesh_tally_0"]["fission"]["mean"][:] + mesh_sdev = f["tallies"]["mesh_tally_0"]["fission"]["sdev"][:] + + +# Different values of lambda (the sparsity parameter in basis pursuit denoising) +lambda_array = [ + "mesh", + 0, + 5e-7, + 1e-6, + 5e-6, + 1e-5, + 5e-5, + 1e-4, + 5e-4, + 1e-3, + 5e-3, + 1e-2, + 5e-2, + 1e-1, + 2e-1, + 3e-1, +] + +# Finding the reconstructions and relative errors +recon_array = [ + mesh_results if lam == "mesh" else cs_reconstruct(S, cs_results, lam, Nx, Ny, Nz) + for lam in lambda_array +] +rel_errors = [rel_l2_error(recon, mesh_results) for recon in recon_array] + + +# Plotting the reconstructions for different lambda values +fig, axes = plt.subplots(4, 4, figsize=(12, 12)) +for i, ax in enumerate(axes.flat): + l = lambda_array[i] + reconstruction = recon_array[i] + if N_dims == 2: + im = ax.imshow(reconstruction, origin="lower", extent=[0, 4, 0, 4]) + elif N_dims == 3: + im = ax.imshow( + reconstruction[:, :, Nz // 2], origin="lower", extent=[0, 4, 0, 4] + ) + + ax.set_title(f"MC/DC Solution" if l == "mesh" else f"$\lambda$ = {l:.5g}") + ax.set_ylabel("y [cm]" if i % 4 == 0 else "") + ax.set_xlabel("x [cm]" if i >= 12 else "") + + # Add colorbar + cbar = fig.colorbar(im, ax=ax, orientation="vertical", shrink=1) + cbar.formatter.set_powerlimits((0, 0)) + +plt.suptitle( + "Basis Pursuit Denoising Reconstructions with Different Values of $\lambda$", + fontsize=16, +) +plt.tight_layout() +if N_dims == 2: + plt.savefig( + f"{N_dims}D_sphere_recons_{N_cs_bins}_{bin_size_pixels}{bin_size_pixels}.png" + ) +elif N_dims == 3: + plt.savefig( + f"{N_dims}D_sphere_recons_{N_cs_bins}_{bin_size_pixels}{bin_size_pixels}{bin_size_pixels}.png" + ) +plt.show() + + +# Plotting the relative errors +plt.plot(lambda_array[1:], rel_errors[1:], label="Reconstruction Errors") +plt.hlines( + np.linalg.norm(mesh_sdev.flatten(), ord=2) + / np.linalg.norm(mesh_results.flatten(), ord=2), + plt.xlim()[0], + plt.xlim()[1], + color="black", + linestyle="--", + label="Reference Std Dev", +) +plt.xscale("log") +plt.yscale("log") +plt.xlabel("$\lambda$") +plt.ylabel("Relative L$^2$ Error") +plt.title(f"Relative Error vs $\lambda$ - {N_dims}D Sphere Reconstructions") +plt.legend() +plt.tight_layout() +if N_dims == 2: + plt.savefig( + f"{N_dims}D_sphere_errors_{N_cs_bins}_{bin_size_pixels}{bin_size_pixels}.png" + ) +elif N_dims == 3: + plt.savefig( + f"{N_dims}D_sphere_errors_{N_cs_bins}_{bin_size_pixels}{bin_size_pixels}{bin_size_pixels}.png" + ) + +plt.show() diff --git a/examples/fixed_source/sphere_in_cube/cs_processing/single_recon_cs_process.py b/examples/fixed_source/sphere_in_cube/cs_processing/single_recon_cs_process.py new file mode 100644 index 000000000..4deccc019 --- /dev/null +++ b/examples/fixed_source/sphere_in_cube/cs_processing/single_recon_cs_process.py @@ -0,0 +1,35 @@ +import h5py +import numpy as np +import matplotlib.pyplot as plt +import scipy.fft as spfft +import time +from mcdc.main import cs_reconstruct, construct_cs_sampling_matrix_S + +try: + import cvxpy as cp +except ImportError: + print("CVXPY has not been installed. Please install it with 'pip install cvxpy'") + + +# This script just provides a single reconstruction, based on the dimensions of +# x and y (from Nx and Ny), and the lambda_ value. A search for an optimal lambda_ +# may be necessary, in which case, cs_process.py may be of use. + +Nx = 40 +Ny = 40 +lambda_ = 1e-3 + +# Don't touch anything below this +with h5py.File("output.h5", "r") as f: + # Obtaining the sampling matrix from the problem parameters + S, N_dims, bin_size_pixels = construct_cs_sampling_matrix_S(f, Nx, Ny) + N_cs_bins = f["tallies"]["cs_tally_0"]["N_cs_bins"][()] + + # The measurement vector + cs_results = f["tallies"]["cs_tally_0"]["fission"]["mean"][:] + +# Performing the reconstructions +recon = cs_reconstruct(S, cs_results, lambda_, Nx, Ny) + +plt.imshow(recon) +plt.show() diff --git a/examples/fixed_source/sphere_in_cube/input.py b/examples/fixed_source/sphere_in_cube/input.py index 3a8585823..b6767e21f 100644 --- a/examples/fixed_source/sphere_in_cube/input.py +++ b/examples/fixed_source/sphere_in_cube/input.py @@ -42,18 +42,16 @@ scores=["fission"], x=np.linspace(0.0, 4.0, 41), y=np.linspace(0.0, 4.0, 41), - # z=np.linspace(0.0, 4.0, 41), - # t=np.linspace(0.0, 200.0, 2), + # z=np.linspace(0.0, 4.0, 21), ) -mcdc.tally.cell_tally(sphere_cell, scores=["fission"]) - mcdc.tally.cs_tally( - N_cs_bins=[150], + N_cs_bins=[300], cs_bin_size=np.array([3.0, 3.0]), + scores=["fission"], x=np.linspace(0.0, 4.0, 41), y=np.linspace(0.0, 4.0, 41), - scores=["fission"], + # z=np.linspace(0.0, 4.0, 21), ) # Setting diff --git a/examples/fixed_source/sphere_in_cube/process.py b/examples/fixed_source/sphere_in_cube/process.py index c77451d04..b27e5cd4f 100644 --- a/examples/fixed_source/sphere_in_cube/process.py +++ b/examples/fixed_source/sphere_in_cube/process.py @@ -1,62 +1,12 @@ import h5py import numpy as np import matplotlib.pyplot as plt -import scipy.fft as spfft -import cvxpy as cp - -# Note: there are some lines in main.py with np.save(...) that I added -# for ease of post-processing, like getting the center points used and -# the sampling matrix S. None are required for the input file and this -# script to run, but may be useful for debugging purposes with h5py.File("output.h5", "r") as f: - S = f["tallies"]["cs_tally_0"]["S"][:] - recon = f["tallies"]["cs_tally_0"]["fission"]["reconstruction"] - plt.imshow(recon) - plt.title("Reconstruction, $\lambda$ = 0.5") # assuming l in main.py remains at 0.5 - plt.colorbar() - plt.show() - - cs_results = f["tallies"]["cs_tally_0"]["fission"]["mean"][:] - mesh_results = f["tallies"]["mesh_tally_0"]["fission"]["mean"][:] - plt.imshow(mesh_results) - plt.title("mesh results") + plt.imshow(mesh_results, extent=[0, 4, 0, 4]) + plt.xlabel("x [cm]") + plt.ylabel("y [cm]") + plt.title(f"Fission Results") plt.colorbar() plt.show() - -Nx = 40 -Ny = 40 -N_fine_cells = Nx * Ny - -# Can use this for post-processing -# mesh_b = S @ mesh_results.flatten() -# b = mesh_b - -# Use this for analyzing the in-situ results -cs_b = cs_results -b = cs_b - -# Constructing T and A -idct_basis_x = spfft.idct(np.identity(Nx), axis=0) -idct_basis_y = spfft.idct(np.identity(Ny), axis=0) - -T_inv = np.kron(idct_basis_y, idct_basis_x) -A = S @ T_inv - -# Basis pursuit denoising solver - change l to get different results -vx = cp.Variable(N_fine_cells) -l = 10 -objective = cp.Minimize(0.5 * cp.norm(A @ vx - b, 2) + l * cp.norm(vx, 1)) -prob = cp.Problem(objective) -result = prob.solve(verbose=False) -sparse_solution = np.array(vx.value).squeeze() - -# Obtaining the reconstruction -recon = T_inv @ sparse_solution -recon_reshaped = recon.reshape(Ny, Nx) - -plt.imshow(recon_reshaped) -plt.title(f"Reconstruction, $\lambda$ = {l}") -plt.colorbar() -plt.show() diff --git a/mcdc/card.py b/mcdc/card.py index 91af4a9a8..8f3183f65 100644 --- a/mcdc/card.py +++ b/mcdc/card.py @@ -424,6 +424,6 @@ def __init__(self): self.x = np.array([-INF, INF]) self.y = np.array([-INF, INF]) self.z = np.array([-INF, INF]) - self.N_bin = 1 self.N_cs_bins = 1 - self.cs_bin_size = np.array([1.0, 1.0]) + self.cs_bin_size = np.array([1.0, 1.0, 1.0]) + self.N_bin = 1 diff --git a/mcdc/kernel.py b/mcdc/kernel.py index 8305716e7..c5d559d61 100644 --- a/mcdc/kernel.py +++ b/mcdc/kernel.py @@ -2137,11 +2137,12 @@ def score_cs_tally(P_arr, distance, tally, data, mcdc): material = mcdc["materials"][P["material_ID"]] N_cs_bins = tally["filter"]["N_cs_bins"] - cs_bin_size = tally["filter"]["cs_bin_size"] cs_centers = tally["filter"]["cs_centers"] + cs_bin_size = tally["filter"]["cs_bin_size"] stride = tally["stride"] - bin_idx = stride["tally"] + bin_starting_idx = stride["tally"] + # bin_idx = stride["tally"] # Particle 4D direction ux = P["ux"] @@ -2161,28 +2162,32 @@ def score_cs_tally(P_arr, distance, tally, data, mcdc): # Check each coarse bin for j in range(N_cs_bins): - center = adapt.local_array(2, type_.float64) - start = adapt.local_array(2, type_.float64) - end = adapt.local_array(2, type_.float64) - # + center = adapt.local_array(3, type_.float64) + start = adapt.local_array(3, type_.float64) + end = adapt.local_array(3, type_.float64) + center[0] = cs_centers[0][j] center[1] = cs_centers[1][j] + center[2] = cs_centers[2][j] start[0] = x start[1] = y + start[2] = z end[0] = x_final end[1] = y_final + end[2] = z_final distance_inside = calculate_distance_in_coarse_bin( - start, end, distance, center, cs_bin_size + start, end, center, cs_bin_size ) # Last bin covers the whole problem if j == N_cs_bins - 1: - cs_bin_size_full_problem = adapt.local_array(2, type_.float64) + cs_bin_size_full_problem = adapt.local_array(3, type_.float64) cs_bin_size_full_problem[0] = INF cs_bin_size_full_problem[1] = INF + cs_bin_size_full_problem[2] = INF distance_inside = calculate_distance_in_coarse_bin( - start, end, distance, center, cs_bin_size_full_problem + start, end, center, cs_bin_size_full_problem ) if distance < distance_inside: @@ -2207,13 +2212,14 @@ def score_cs_tally(P_arr, distance, tally, data, mcdc): adapt.global_add( tally_bin, - (TALLY_SCORE, bin_idx + j * tally["N_score"] + i), + (TALLY_SCORE, bin_starting_idx + j * tally["N_score"] + i), round(score), ) @njit def cs_clip(p, q, t0, t1): + # Core of the Liang-Barsky algorithm if p < 0: t = q / p if t > t1: @@ -2232,49 +2238,61 @@ def cs_clip(p, q, t0, t1): @njit -def cs_tracklength_in_box(start, end, x_min, x_max, y_min, y_max): - # Uses Liang-Barsky algorithm for finding tracklength in box - t0, t1 = 0.0, 1.0 +def cs_tracklength_in_box(start, end, x_min, x_max, y_min, y_max, z_min, z_max): + # parametric version of the tracklength, ranging from t0 -> t1 + t0 = 0.0 + t1 = 1.0 dx = end[0] - start[0] dy = end[1] - start[1] + dz = end[2] - start[2] + + ps = np.array([-dx, dx, -dy, dy, -dz, dz]) + qs = np.array( + [ + start[0] - x_min, + x_max - start[0], + start[1] - y_min, + y_max - start[1], + start[2] - z_min, + z_max - start[2], + ] + ) - # Perform clipping for each boundary - result, t0, t1 = cs_clip(-dx, start[0] - x_min, t0, t1) - if not result: - return 0.0 - result, t0, t1 = cs_clip(dx, x_max - start[0], t0, t1) - if not result: - return 0.0 - result, t0, t1 = cs_clip(-dy, start[1] - y_min, t0, t1) - if not result: - return 0.0 - result, t0, t1 = cs_clip(dy, y_max - start[1], t0, t1) - if not result: - return 0.0 - - # Update start and end points based on clipping results - if t1 < 1: - end[0] = start[0] + t1 * dx - end[1] = start[1] + t1 * dy - if t0 > 0: - start[0] = start[0] + t0 * dx - start[1] = start[1] + t0 * dx - - # Return the norm - X = end[0] - start[0] - Y = end[1] - start[1] - return math.sqrt(X**2 + Y**2) + # clip each point on each face + for i in range(6): + p = ps[i] + q = qs[i] + result, t0, t1 = cs_clip(p, q, t0, t1) + if not result: + return 0.0 + + # clipped start points (s) and end points (e) + sx = start[0] + t0 * dx + sy = start[1] + t0 * dy + sz = start[2] + t0 * dz + ex = start[0] + t1 * dx + ey = start[1] + t1 * dy + ez = start[2] + t1 * dz + + X = ex - sx + Y = ey - sy + Z = ez - sz + + return np.sqrt(X**2 + Y**2 + Z**2) @njit -def calculate_distance_in_coarse_bin(start, end, distance, center, cs_bin_size): - # Edges of the coarse bin +def calculate_distance_in_coarse_bin(start, end, center, cs_bin_size): x_min = center[0] - cs_bin_size[0] / 2 x_max = center[0] + cs_bin_size[0] / 2 y_min = center[1] - cs_bin_size[1] / 2 y_max = center[1] + cs_bin_size[1] / 2 + z_min = center[2] - cs_bin_size[2] / 2 + z_max = center[2] + cs_bin_size[2] / 2 - distance_inside = cs_tracklength_in_box(start, end, x_min, x_max, y_min, y_max) + distance_inside = cs_tracklength_in_box( + start, end, x_min, x_max, y_min, y_max, z_min, z_max + ) return distance_inside diff --git a/mcdc/main.py b/mcdc/main.py index a0c962747..6d5820404 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -7,6 +7,7 @@ import scipy.fft as spfft from scipy.stats.qmc import Halton import cvxpy as cp +import time from mcdc.card import UniverseCard from mcdc.print_ import ( @@ -86,11 +87,6 @@ def run(): loop_fixed_source(data_arr, mcdc_arr) mcdc["runtime_simulation"] = MPI.Wtime() - simulation_start - # Compressed sensing reconstruction - N_cs_bins = mcdc["cs_tallies"]["filter"]["N_cs_bins"][0] - if N_cs_bins != 0: - cs_reconstruct(data, mcdc) - # Output: generate hdf5 output files output_start = MPI.Wtime() generate_hdf5(data, mcdc) @@ -104,122 +100,6 @@ def run(): closeout(mcdc) -def calculate_cs_A(data, mcdc): - x_grid = mcdc["mesh_tallies"]["filter"]["x"][0] - y_grid = mcdc["mesh_tallies"]["filter"]["y"][0] - Nx = len(x_grid) - 1 - Ny = len(y_grid) - 1 - - N_cs_bins = mcdc["cs_tallies"]["filter"]["N_cs_bins"][0] - cs_bin_size = mcdc["cs_tallies"]["filter"]["cs_bin_size"][0] - - S = [[] for _ in range(N_cs_bins)] - - [x_centers, y_centers] = mcdc["cs_tallies"]["filter"]["cs_centers"][0] - x_centers[-1] = (x_grid[-1] + x_grid[0]) / 2 - y_centers[-1] = (y_grid[-1] + y_grid[0]) / 2 - - # Calculate the overlap grid for each bin, and flatten into a row of S - for ibin in range(N_cs_bins): - if ibin == N_cs_bins - 1: - # could just change to -INF, INF - cs_bin_size = np.array([x_grid[-1] + x_grid[0], y_grid[-1] + y_grid[0]]) - - bin_x_min = x_centers[ibin] - cs_bin_size[0] / 2 - bin_x_max = x_centers[ibin] + cs_bin_size[0] / 2 - bin_y_min = y_centers[ibin] - cs_bin_size[1] / 2 - bin_y_max = y_centers[ibin] + cs_bin_size[1] / 2 - - overlap = np.zeros((len(y_grid) - 1, len(x_grid) - 1)) - - for i in range(len(y_grid) - 1): - for j in range(len(x_grid) - 1): - cell_x_min = x_grid[j] - cell_x_max = x_grid[j + 1] - cell_y_min = y_grid[i] - cell_y_max = y_grid[i + 1] - - # Calculate overlap in x and y directions - overlap_x = np.maximum( - 0, - np.minimum(bin_x_max, cell_x_max) - - np.maximum(bin_x_min, cell_x_min), - ) - overlap_y = np.maximum( - 0, - np.minimum(bin_y_max, cell_y_max) - - np.maximum(bin_y_min, cell_y_min), - ) - - # Calculate fractional overlap - cell_area = (cell_x_max - cell_x_min) * (cell_y_max - cell_y_min) - overlap[i, j] = (overlap_x * overlap_y) / cell_area - - S[ibin] = overlap.flatten() - S = np.array(S) - mcdc["cs_tallies"]["filter"]["cs_S"] = S - - assert np.allclose(S[-1], np.ones(Nx * Ny)), "Last row of S must be all ones" - assert S.shape[1] == Nx * Ny, "Size of S must match Nx * Ny." - assert ( - S.shape[1] == mcdc["cs_tallies"]["N_bin"][0] - ), "Size of S must match number of cells in desired mesh tally" - - # TODO: can this be done in a different way? idk - # Construct the DCT matrix T - idct_basis_x = spfft.idct(np.identity(Nx), axis=0) - idct_basis_y = spfft.idct(np.identity(Ny), axis=0) - - T_inv = np.kron(idct_basis_y, idct_basis_x) - A = S @ T_inv - return A, T_inv - - -def calculate_cs_sparse_solution(data, mcdc, A, b): - N_fine_cells = mcdc["cs_tallies"]["N_bin"][0] - - # setting up the problem with CVXPY - vx = cp.Variable(N_fine_cells) - - # Basis pursuit denoising - l = 0.5 - objective = cp.Minimize(0.5 * cp.norm(A @ vx - b, 2) + l * cp.norm(vx, 1)) - prob = cp.Problem(objective) - result = prob.solve(verbose=False) - - # # Basis pursuit - # objective = cp.Minimize(cp.norm(vx, 1)) - # constraints = [A @ vx == b] - # prob = cp.Problem(objective, constraints) - # result = prob.solve(verbose=True) - # print(f'vx.value = {vx.value}') - - # formatting the sparse solution - sparse_solution = np.array(vx.value).squeeze() - - return sparse_solution - - -def cs_reconstruct(data, mcdc): - tally_bin = data[TALLY] - tally = mcdc["cs_tallies"][0] - stride = tally["stride"] - bin_idx = stride["tally"] - N_cs_bins = tally["filter"]["N_cs_bins"] - Nx = len(mcdc["mesh_tallies"]["filter"]["x"][0]) - 1 - Ny = len(mcdc["mesh_tallies"]["filter"]["y"][0]) - 1 - - b = tally_bin[TALLY_SUM, bin_idx : bin_idx + N_cs_bins] - - A, T_inv = calculate_cs_A(data, mcdc) - x = calculate_cs_sparse_solution(data, mcdc, A, b) - - recon = T_inv @ x - recon_reshaped = recon.reshape(Ny, Nx) - - tally["filter"]["cs_reconstruction"] = recon_reshaped - - # ============================================================================= # utilities for handling discrepancies between input and program types # ============================================================================= @@ -426,28 +306,6 @@ def dd_mesh_bounds(idx): return mesh_xn, mesh_xp, mesh_yn, mesh_yp, mesh_zn, mesh_zp -def generate_cs_centers(mcdc, N_dim=3, seed=123456789): - N_cs_bins = int(mcdc["cs_tallies"]["filter"]["N_cs_bins"]) - x_lims = ( - mcdc["cs_tallies"]["filter"]["x"][0][-1], - mcdc["cs_tallies"]["filter"]["x"][0][0], - ) - y_lims = ( - mcdc["cs_tallies"]["filter"]["y"][0][-1], - mcdc["cs_tallies"]["filter"]["y"][0][0], - ) - - # Generate Halton sequence according to the seed - halton_seq = Halton(d=N_dim, seed=seed) - points = halton_seq.random(n=N_cs_bins) - - # Extract x and y coordinates as tuples separately, scaled to the problem dimensions - x_coords = tuple(points[:, 0] * (x_lims[1] - x_lims[0]) + x_lims[0]) - y_coords = tuple(points[:, 1] * (y_lims[1] - y_lims[0]) + y_lims[0]) - - return (x_coords, y_coords) - - def prepare(): """ Preparing the MC transport simulation: @@ -1089,23 +947,39 @@ def prepare(): # CS tallies for i in range(N_cs_tally): # Direct assignment - copy_field(mcdc["cs_tallies"][i], input_deck.cs_tallies[i], "N_bin") - mcdc["cs_tallies"][i]["filter"]["N_cs_bins"] = input_deck.cs_tallies[ i ].N_cs_bins[0] - mcdc["cs_tallies"][i]["filter"]["cs_bin_size"] = input_deck.cs_tallies[ - i - ].cs_bin_size[0] + + copy_field(mcdc["cs_tallies"][i], input_deck.cs_tallies[i], "N_bin") # Filters (variables with possible different sizes) if not input_deck.technique["domain_decomposition"]: - for name in ["x", "y", "z", "t", "mu", "azi", "g"]: + for name in ["x", "y", "z"]: N = len(getattr(input_deck.cs_tallies[i], name)) mcdc["cs_tallies"][i]["filter"][name][:N] = getattr( input_deck.cs_tallies[i], name ) + # Scaling the bin to be in terms of problem units, not pixels/voxels/etc. + x_grid = mcdc["cs_tallies"][i]["filter"]["x"] + y_grid = mcdc["cs_tallies"][i]["filter"]["y"] + z_grid = mcdc["cs_tallies"][i]["filter"]["z"] + + x_bin_size = input_deck.cs_tallies[i].cs_bin_size[0] + y_bin_size = input_deck.cs_tallies[i].cs_bin_size[1] + z_bin_size = input_deck.cs_tallies[i].cs_bin_size[2] + + x_bin_size = x_bin_size / (len(x_grid) - 1) * (x_grid[-1] - x_grid[0]) + y_bin_size = y_bin_size / (len(y_grid) - 1) * (y_grid[-1] - y_grid[0]) + z_bin_size = z_bin_size / (len(z_grid) - 1) * (z_grid[-1] - z_grid[0]) + + mcdc["cs_tallies"][i]["filter"]["cs_bin_size"] = [ + x_bin_size, + y_bin_size, + z_bin_size, + ] + mcdc["cs_tallies"][i]["filter"]["cs_centers"] = generate_cs_centers(mcdc) # Set tally scores @@ -1126,12 +1000,32 @@ def prepare(): score_type = SCORE_NET_CURRENT mcdc["cs_tallies"][i]["scores"][j] = score_type + # Filter grid sizes + Nx = len(input_deck.cs_tallies[i].x) - 1 + Ny = len(input_deck.cs_tallies[i].x) - 1 + Nz = len(input_deck.cs_tallies[i].x) - 1 + mcdc["cs_tallies"][i]["filter"]["Nx"] = Nx + mcdc["cs_tallies"][i]["filter"]["Ny"] = Ny + mcdc["cs_tallies"][i]["filter"]["Nz"] = Nz + # Update N_bin mcdc["cs_tallies"][i]["N_bin"] *= N_score + # Filter strides + stride = N_score + if Nz > 1: + mcdc["cs_tallies"][i]["stride"]["z"] = stride + stride *= Nz + if Ny > 1: + mcdc["cs_tallies"][i]["stride"]["y"] = stride + stride *= Ny + if Nx > 1: + mcdc["cs_tallies"][i]["stride"]["x"] = stride + stride *= Nx + # Set tally stride and accumulate total tally size mcdc["cs_tallies"][i]["stride"]["tally"] = tally_size - tally_size += mcdc["cs_tallies"][i]["filter"]["N_cs_bins"] + tally_size += mcdc["cs_tallies"][i]["N_bin"] # Set tally data if not input_deck.technique["uq"]: @@ -1827,6 +1721,241 @@ def dd_mergemesh(mcdc, data): return dd_mesh +# ====================================================================================== +# Compressed sensing functions +# ====================================================================================== +def generate_cs_centers(mcdc, seed=123456789): + """ + Generates random points with scipy.stats.qmc.Halton + """ + N_cs_bins = int(mcdc["cs_tallies"]["filter"]["N_cs_bins"]) + + # Taking the limits of the problem + x_lims = ( + mcdc["cs_tallies"]["filter"]["x"][0][-1], + mcdc["cs_tallies"]["filter"]["x"][0][0], + ) + y_lims = ( + mcdc["cs_tallies"]["filter"]["y"][0][-1], + mcdc["cs_tallies"]["filter"]["y"][0][0], + ) + z_lims = ( + mcdc["cs_tallies"]["filter"]["z"][0][-1], + mcdc["cs_tallies"]["filter"]["z"][0][0], + ) + + if z_lims != (INF, -INF): + N_dim = 3 + else: + N_dim = 2 + + # Generate Halton sequence according to the seed + halton_seq = Halton(d=N_dim, seed=seed) + points = halton_seq.random(n=N_cs_bins) + + # Extract x/y/z coordinates as tuples separately, scaled to the problem dimensions + x_coords = points[:, 0] * (x_lims[1] - x_lims[0]) + x_lims[0] + y_coords = points[:, 1] * (y_lims[1] - y_lims[0]) + y_lims[0] + + # Last bin is in exact center + x_coords[-1] = (x_lims[0] + x_lims[-1]) / 2 + y_coords[-1] = (y_lims[0] + y_lims[-1]) / 2 + + x_coords = tuple(x_coords) + y_coords = tuple(y_coords) + + if N_dim == 2: + z_coords = [0] * len(x_coords) + elif N_dim == 3: + z_coords = points[:, 2] * (z_lims[1] - z_lims[0]) + z_lims[0] + z_coords[-1] = (z_lims[0] + z_lims[-1]) / 2 + z_coords = tuple(z_coords) + return (x_coords, y_coords, z_coords) + + +def construct_cs_sampling_matrix_S( + output_file, Nx, Ny, Nz=1, problem_lims=[4.0, 4.0, 4.0] +): + """ + Generates the sampling matrix for compressed sensing use. + + Args: + output_file (string): The output file which stores the necessary data to generate S. + Nx (int): The number of fine-mesh cells in the x-direction. + Ny (int): The number of fine-mesh cells in the y-direction. + Nz (int, optional): The number of fine-mesh cells in the z-direction. Default is 1 for 2D problems. + problem_lims (np.ndarray, optional): The physical size of the problem. Default is [4.0, 4.0, 4.0] + + Returns: + S (np.ndarray): The sampling matrix, dictates how the system was sampled. + N_dims (int): Returns 2 or 3, depending on the number of dimensions in the problem. + bin_size_pixels (int): Size of the compressed sensing bins in units of pixels (fine-mesh cells). + """ + + f = output_file + N_cs_bins = f["tallies"]["cs_tally_0"]["N_cs_bins"][()] + cs_bin_size = f["tallies"]["cs_tally_0"]["cs_bin_size"] + centers = f["tallies"]["cs_tally_0"]["center_points"] + + # size should default to INF - (-INF) in 2d case + if cs_bin_size[:][2] == 2e20: + N_dims = 2 + else: + N_dims = 3 + + if N_dims == 2: + x_grid = np.linspace(0.0, problem_lims[0], Nx + 1) + y_grid = np.linspace(0.0, problem_lims[1], Ny + 1) + N_cs_bins = f["tallies"]["cs_tally_0"]["N_cs_bins"][()] + cs_bin_size = f["tallies"]["cs_tally_0"]["cs_bin_size"] + x_centers = f["tallies"]["cs_tally_0"]["center_points"][0] + y_centers = f["tallies"]["cs_tally_0"]["center_points"][1] + x_centers[-1] = (x_grid[-1] + x_grid[0]) / 2 + y_centers[-1] = (y_grid[-1] + y_grid[0]) / 2 + x_mins, x_maxs = x_grid[:-1], x_grid[1:] + y_mins, y_maxs = y_grid[:-1], y_grid[1:] + + bin_size_pixels = int(cs_bin_size[0] * Nx / x_grid[-1]) + + # area of a single cell + cell_areas = np.multiply.outer(x_maxs - x_mins, y_maxs - y_mins) + + # initialize S + S = np.zeros((N_cs_bins, Nx * Ny)) + + print("Generating S...") + for ibin in range(N_cs_bins): + bin_x_min = x_centers[ibin] - cs_bin_size[0] / 2 + bin_x_max = x_centers[ibin] + cs_bin_size[0] / 2 + bin_y_min = y_centers[ibin] - cs_bin_size[1] / 2 + bin_y_max = y_centers[ibin] + cs_bin_size[1] / 2 + + # calculate overlap + overlap_x = np.maximum( + 0, + np.minimum(bin_x_max, x_maxs[:, None]) + - np.maximum(bin_x_min, x_mins[:, None]), + ) + overlap_y = np.maximum( + 0, + np.minimum(bin_y_max, y_maxs[None, :]) + - np.maximum(bin_y_min, y_mins[None, :]), + ) + + # calculate fractional overlap + overlap = (overlap_x * overlap_y) / cell_areas + S[ibin] = overlap.flatten() + + for i in range(len(S[-1])): + S[-1][i] = 1 + + elif N_dims == 3: + x_grid = np.linspace(0.0, problem_lims[0], Nx + 1) + y_grid = np.linspace(0.0, problem_lims[1], Ny + 1) + z_grid = np.linspace(0.0, problem_lims[2], Nz + 1) + N_cs_bins = f["tallies"]["cs_tally_0"]["N_cs_bins"][()] + cs_bin_size = f["tallies"]["cs_tally_0"]["cs_bin_size"] + x_centers = f["tallies"]["cs_tally_0"]["center_points"][0] + y_centers = f["tallies"]["cs_tally_0"]["center_points"][1] + z_centers = f["tallies"]["cs_tally_0"]["center_points"][2] + x_centers[-1] = (x_grid[-1] + x_grid[0]) / 2 + y_centers[-1] = (y_grid[-1] + y_grid[0]) / 2 + z_centers[-1] = (z_grid[-1] + z_grid[0]) / 2 + x_mins, x_maxs = x_grid[:-1], x_grid[1:] + y_mins, y_maxs = y_grid[:-1], y_grid[1:] + z_mins, z_maxs = z_grid[:-1], z_grid[1:] + + bin_size_pixels = int(cs_bin_size[0] * Nx / x_grid[-1]) + + # area of a single cell + cell_areas = np.multiply.outer( + np.multiply.outer(x_maxs - x_mins, y_maxs - y_mins), z_maxs - z_mins + ) + + # initialize S + S = np.zeros((N_cs_bins, Nx * Ny * Nz)) + + print("Generating S...") + for ibin in range(N_cs_bins): + bin_x_min = x_centers[ibin] - cs_bin_size[0] / 2 + bin_x_max = x_centers[ibin] + cs_bin_size[0] / 2 + bin_y_min = y_centers[ibin] - cs_bin_size[1] / 2 + bin_y_max = y_centers[ibin] + cs_bin_size[1] / 2 + bin_z_min = z_centers[ibin] - cs_bin_size[2] / 2 + bin_z_max = z_centers[ibin] + cs_bin_size[2] / 2 + + # calculate overlap + overlap_x = np.maximum( + 0, + np.minimum(bin_x_max, x_maxs[:, None, None]) + - np.maximum(bin_x_min, x_mins[:, None, None]), + ) + overlap_y = np.maximum( + 0, + np.minimum(bin_y_max, y_maxs[None, :, None]) + - np.maximum(bin_y_min, y_mins[None, :, None]), + ) + overlap_z = np.maximum( + 0, + np.minimum(bin_z_max, z_maxs[None, None, :]) + - np.maximum(bin_z_min, z_mins[None, None, :]), + ) + + # calculate fractional overlap + overlap = (overlap_x * overlap_y * overlap_z) / cell_areas + S[ibin] = overlap.flatten() + + for i in range(len(S[-1])): + S[-1][i] = 1 + + return S, N_dims, bin_size_pixels + + +def cs_reconstruct(S, b, lambda_, Nx, Ny, Nz=1): + """ + Reconstructs a solution using compressed sensing and Basis Pursuit Denoising (BPDN). + + Args: + S (np.ndarray): Sampling matrix of shape (M, N), where M is the number of measurements + and N is the number of fine cells (Nx * Ny * Nz). + Typically generated using `construct_cs_sampling_matrix()`. + b (np.ndarray): Measurement vector of shape (M,). Represents measurements from the system. + lambda_ (float): Regularization parameter controlling sparsity in the solution. + Nx (int): Number of fine-mesh cells to be reconstructed, in the x-direction. + Ny (int): Number of fine-mesh cells to be reconstructed, in the y-direction. + Nz (int, optional): Number of fine-mesh cells to be reconstructed, in the z-direction (default is 1, for 2D problems). + + Returns: + np.ndarray: The reconstructed field, reshaped to (Nx, Ny) if 2D, or (Nx, Ny, Nz) if 3D. + """ + + print(f"Reconstructing with lambda = {lambda_}", end="\r") + start_time = time.time() + + N_fine_cells = Nx * Ny * Nz + A = (spfft.dct(S.T, type=2, norm="ortho", axis=0)).T + + vx = cp.Variable(N_fine_cells) + objective = cp.Minimize( + 0.5 * cp.norm(A @ vx - b, 2) ** 2 + lambda_ * cp.norm(vx, 1) + ) + constraints = [(A @ vx)[-1] == b[-1]] + prob = cp.Problem(objective, constraints) + result = prob.solve(verbose=False) + + sparse_solution = np.array(vx.value).squeeze() + result = spfft.idct(sparse_solution, type=2, norm="ortho", axis=0) + if Nz == 1: + recon = result.reshape(Nx, Ny) + else: + recon = result.reshape(Nx, Ny, Nz) + print( + f"Reconstructing with lambda = {lambda_}, time = {np.round(time.time() - start_time, 4)}" + ) + return recon + # return recon, time.time() - start_time + + def generate_hdf5(data, mcdc): if mcdc["technique"]["domain_decomposition"]: @@ -2082,6 +2211,7 @@ def generate_hdf5(data, mcdc): if mcdc["technique"]["iQMC"]: break N_cs_bins = tally["filter"]["N_cs_bins"] + cs_bin_size = tally["filter"]["cs_bin_size"] # Shape N_score = tally["N_score"] @@ -2112,14 +2242,17 @@ def generate_hdf5(data, mcdc): group_name = "tallies/cs_tally_%i/%s/" % (ID, score_name) center_points = tally["filter"]["cs_centers"] - S = tally["filter"]["cs_S"] - reconstruction = tally["filter"]["cs_reconstruction"] f.create_dataset( "tallies/cs_tally_%i/center_points" % (ID), data=center_points ) - f.create_dataset("tallies/cs_tally_%i/S" % (ID), data=S) - f.create_dataset(group_name + "reconstruction", data=reconstruction) + + f.create_dataset( + "tallies/cs_tally_%i/N_cs_bins" % (ID), data=N_cs_bins + ) + f.create_dataset( + "tallies/cs_tally_%i/cs_bin_size" % (ID), data=cs_bin_size + ) mean = score_tally_bin[TALLY_SUM] sdev = score_tally_bin[TALLY_SUM_SQ] diff --git a/mcdc/tally.py b/mcdc/tally.py index 4cb6d9b9e..54b9af1a0 100644 --- a/mcdc/tally.py +++ b/mcdc/tally.py @@ -247,8 +247,8 @@ def cell_tally( def cs_tally( - N_cs_bins=10, - cs_bin_size=([1.0, 1.0]), + N_cs_bins=100, + cs_bin_size=([1.0, 1.0, 1.0]), x=np.array([-INF, INF]), y=np.array([-INF, INF]), z=np.array([-INF, INF]), @@ -265,16 +265,19 @@ def cs_tally( # Set ID card.ID = len(global_.input_deck.cs_tallies) + # Set bin properties + card.N_cs_bins = N_cs_bins + + if len(cs_bin_size) == 2: + card.cs_bin_size = [cs_bin_size[0], cs_bin_size[1], INF] + else: + card.cs_bin_size = cs_bin_size + # Set mesh card.x = x card.y = y card.z = z - # Set bin properties, convert bin size to problem units - card.N_cs_bins = N_cs_bins - card.cs_bin_size[0] = cs_bin_size[0] / (len(x) - 1) * (x[-1] - x[0]) - card.cs_bin_size[1] = cs_bin_size[1] / (len(y) - 1) * (y[-1] - y[0]) - # Set other filters card.t = t card.mu = mu @@ -293,11 +296,12 @@ def cs_tally( Nx = len(card.x) - 1 Ny = len(card.y) - 1 Nz = len(card.z) - 1 - Nt = len(card.t) - 1 - Nmu = len(card.mu) - 1 - N_azi = len(card.azi) - 1 - Ng = len(card.g) - 1 - card.N_bin = Nx * Ny * Nz * Nt * Nmu * N_azi * Ng + card.N_bin = Nx * Ny * Nz + + if Nx == 1 and Ny == 1 and Nz == 1: + raise Exception( + "Must supply a mesh grid for the cs_tally bins to be placed upon." + ) # Scores for s in scores: diff --git a/mcdc/type_.py b/mcdc/type_.py index d005d057b..bc1f95cfc 100644 --- a/mcdc/type_.py +++ b/mcdc/type_.py @@ -930,20 +930,20 @@ def make_type_cs_tally(input_deck): Nmax_x = 2 Nmax_y = 2 Nmax_z = 2 - Nmax_t = 2 - Nmax_mu = 2 - Nmax_azi = 2 - Nmax_g = 2 + # Nmax_t = 2 + # Nmax_mu = 2 + # Nmax_azi = 2 + # Nmax_g = 2 Nmax_score = 1 N_cs_centers = 1 for card in input_deck.cs_tallies: Nmax_x = max(Nmax_x, len(card.x)) Nmax_y = max(Nmax_y, len(card.y)) Nmax_z = max(Nmax_z, len(card.z)) - Nmax_t = max(Nmax_t, len(card.t)) - Nmax_mu = max(Nmax_mu, len(card.mu)) - Nmax_azi = max(Nmax_azi, len(card.azi)) - Nmax_g = max(Nmax_g, len(card.g)) + # Nmax_t = max(Nmax_t, len(card.t)) + # Nmax_mu = max(Nmax_mu, len(card.mu)) + # Nmax_azi = max(Nmax_azi, len(card.azi)) + # Nmax_g = max(Nmax_g, len(card.g)) Nmax_score = max(Nmax_score, len(card.scores)) N_cs_centers = card.N_cs_bins[0] @@ -954,24 +954,25 @@ def make_type_cs_tally(input_deck): # Set the filter filter_ = [ ("N_cs_bins", int), - ("cs_bin_size", float64, (2,)), + ("cs_bin_size", float64, (3,)), ( "cs_centers", float64, ( - 2, + 3, N_cs_centers, ), ), - ("cs_S", float64, (N_cs_centers, (Nmax_x - 1) * (Nmax_y - 1))), - ("cs_reconstruction", float64, ((Nmax_y - 1), (Nmax_x - 1))), ("x", float64, (Nmax_x,)), ("y", float64, (Nmax_y,)), ("z", float64, (Nmax_z,)), - ("t", float64, (Nmax_t,)), - ("mu", float64, (Nmax_mu,)), - ("azi", float64, (Nmax_azi,)), - ("g", float64, (Nmax_g,)), + ("Nx", int64), + ("Ny", int64), + ("Nz", int64), + # ("t", float64, (Nmax_t,)), + # ("mu", float64, (Nmax_mu,)), + # ("azi", float64, (Nmax_azi,)), + # ("g", float64, (Nmax_g,)), ] struct += [("filter", filter_)] @@ -979,25 +980,20 @@ def make_type_cs_tally(input_deck): # Tally strides stride = [ ("tally", int64), - ("sensitivity", int64), - ("mu", int64), - ("azi", int64), - ("g", int64), - ("t", int64), + # ("sensitivity", int64), + # ("mu", int64), + # ("azi", int64), + # ("g", int64), + # ("t", int64), ("x", int64), ("y", int64), ("z", int64), - # ("N_cs_bins", int64), # TODO: get rid of this line? ] struct += [("stride", stride)] - # Total number of bins (will be used for the reconstruction) - # TODO: Might be able to get rid of this (just get N_bin from the mesh) + # Total number of mesh bins (not cs bins) struct += [("N_bin", int64)] - # Number of compressed sensing bins - # struct += [("N_cs_bins", int64)] - # Scores struct += [("N_score", int64), ("scores", int64, (Nmax_score,))] diff --git a/pyproject.toml b/pyproject.toml index 335257fda..b065fd946 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,10 +59,10 @@ dependencies = [ "black", "sympy", "pre-commit", - "cvxpy" ] [project.optional-dependencies] docs = ["sphinx==7.2.6", "furo", "sphinx_toolbox"] + optimization = ["cvxpy"]