diff --git a/chemtools/__init__.py b/chemtools/__init__.py
index fa23ccdc..1dca91bf 100644
--- a/chemtools/__init__.py
+++ b/chemtools/__init__.py
@@ -30,6 +30,7 @@
from chemtools.denstools import *
from chemtools.utils import *
from chemtools.outputs import *
+from chemtools.topology import *
import horton
diff --git a/chemtools/topology/__init__.py b/chemtools/topology/__init__.py
index 96e4a896..49c5f556 100644
--- a/chemtools/topology/__init__.py
+++ b/chemtools/topology/__init__.py
@@ -23,4 +23,6 @@
"""The Scalar Field Topological Analysis Module."""
-from chemtools.topology import *
+from chemtools.topology.point import *
+from chemtools.topology.critical import *
+from chemtools.topology.qtaim import *
diff --git a/chemtools/topology/critical.py b/chemtools/topology/critical.py
index 96a54ec4..661eacb9 100644
--- a/chemtools/topology/critical.py
+++ b/chemtools/topology/critical.py
@@ -147,9 +147,12 @@ def find_critical_points(self):
if abs(dens) < 1.e-4 and np.all(abs(grad) < 1.e-4):
continue
# compute rank & signature of critical point
- eigenvals, eigenvecs = np.linalg.eigh(self.hess(coord))
- cp = CriticalPoint(coord, eigenvals, eigenvecs, 1e-4)
- self._cps.setdefault((cp.rank[0], cp.signature[0]), []).append(cp)
+ try:
+ eigenvals, eigenvecs = np.linalg.eigh(self.hess(coord))
+ cp = CriticalPoint(coord, eigenvals, eigenvecs, 1e-4)
+ self._cps.setdefault((cp.rank[0], cp.signature[0]), []).append(cp)
+ except np.linalg.LinAlgError:
+ pass
# check Poincare–Hopf equation
if not self.poincare_hopf_equation:
warnings.warn("Poincare–Hopf equation is not satisfied.", RuntimeWarning)
diff --git a/chemtools/topology/ode.py b/chemtools/topology/ode.py
new file mode 100644
index 00000000..82321ed3
--- /dev/null
+++ b/chemtools/topology/ode.py
@@ -0,0 +1,533 @@
+# -*- coding: utf-8 -*-
+# ChemTools is a collection of interpretive chemical tools for
+# analyzing outputs of the quantum chemistry calculations.
+#
+# Copyright (C) 2016-2019 The ChemTools Development Team
+#
+# This file is part of ChemTools.
+#
+# ChemTools is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# ChemTools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see
+#
+# --
+import numpy as np
+from scipy.spatial.distance import cdist
+
+from scipy.integrate import solve_ivp
+
+__all__ = [
+ "find_basins_steepest_ascent_rk45",
+ "gradient_path",
+ "NonNuclearAttractionException"
+]
+
+
+class NonNuclearAttractionException(Exception):
+ """
+ Exception to handle non-nuclear attractors.
+
+ This is a simple hack in-order to get NNA to work without changing much of the code-base.
+ """
+
+ def __init__(self, nna_coordinate):
+ self.nna_coordinate = nna_coordinate
+
+
+def delete_duplicate_pts(pts, eps):
+ dists = cdist(pts, pts)
+ dists[np.isclose(dists, 0)] = np.inf
+ i, j = np.where(dists <= eps)
+ indices_to_delete = np.unique(i)[1:]
+ return np.delete(pts, indices_to_delete, axis=0), indices_to_delete
+
+
+def _get_normalized_gradient_func(grad_func):
+ r"""Returns the normalized version of the function."""
+ def norm_grad_func(x):
+ grad = grad_func(x)
+ return grad / np.linalg.norm(grad, axis=1)[:, None]
+ # norm_grad = np.linalg.norm(grad, axis=1)
+ # grad = grad / norm_grad[:, None]
+ # grad[np.abs(norm_grad) < 1e-45, :] = 0.0
+ # return grad
+ return norm_grad_func
+
+
+def _RK45_step(pts, grad_func, step_size, grad0=None):
+ r"""
+ Runge-Kutta fourth and five-order step for the following ode system:
+
+ .. math::
+ \frac{d(r(s))}{ds} = \nabla \rho (r(s)),
+
+ where :math:`\nabla \rho(x)` is the gradient of a function.
+
+ Parameters
+ ----------
+ pts : ndarray(M, 3)
+ Points to take a step in.
+ grad_func: callable(ndarray(M,3), ndarray(M,3))
+ Callable function that takes in points and obtains the gradient.
+ step_size: float
+ Stepsize for the step
+ grad0: ndarray(M, 3)
+ If the gradient is already computed on `pts`, then avoids re-computing it.
+
+ Returns
+ -------
+ (ndarray(M,3), ndarray(M,3)):
+ Returns fourth-order and fifth-order Runge-Kutta step, respectively.
+
+ """
+ # Fourth and Fifth-Order Runge-Kutta
+ if grad0 is None:
+ k1 = step_size * grad_func(pts)
+ else:
+ k1 = step_size * grad0
+ k2 = step_size * grad_func(pts + 0.4 * k1)
+ k3 = step_size * grad_func(pts + (3.0 / 32) * k1 + (9.0 / 32.0) * k2)
+ k4 = step_size * grad_func(pts + (1932 / 2197) * k1 - (7200 / 2197) * k2 + (7296 / 2197) * k3)
+ k5 = step_size * grad_func(
+ pts + (439 / 216) * k1 - 8.0 * k2 + (3680 / 513) * k3 - (845 / 4104) * k4
+ )
+ k6 = step_size * grad_func(
+ pts
+ - (8.0 / 27.0) * k1
+ + 2.0 * k2
+ - (3544 / 2565) * k3
+ + (1859 / 4104) * k4
+ - (11 / 40) * k5
+ )
+
+ # Get the fourth and five-order approximation
+ y_four = pts + (25.0 / 216.0) * k1 + (1408 / 2565) * k3 + (2197 / 4101) * k4 - k5 / 5.0
+ y_five = (
+ pts +
+ (16.0 / 135.0) * k1
+ + (6656 / 12825) * k3
+ + (28561 / 56430) * k4
+ - (9.0 / 50.0) * k5
+ + (2.0 / 55.0) * k6
+ )
+ return y_four, y_five
+
+
+def find_basins_steepest_ascent_rk45(
+ initial_pts,
+ dens_func,
+ grad_func,
+ beta_spheres,
+ maximas,
+ ss_0=1e-7,
+ tol=1e-7,
+ max_ss=0.25,
+ maxiter=5000,
+ iter_nna=100,
+ hess_func=None,
+ terminate_if_other_basin_found=False,
+ check_for_nna=False
+):
+ r"""
+ Solves the following problem ODE using Runge-Kutta of order 4(5) with adaptive step-size
+
+ .. math::
+ \frac{d(r(t))}{dt} = \frac{\nabla \rho(r(t))}{|| \rho(r() ||}
+
+ over a set of points.
+
+ Parameters
+ ----------
+ initial_pts: ndarray(N, 3)
+ Initial points to solve for steepest-ascent/backtracing.
+ dens_func: callable(ndarray(N,3), ndarray(N))
+ The electron density function.
+ grad_func: callable(ndarray(N,3), ndarray(N,3))
+ The gradient of the electron density.
+ beta_spheres: ndarray(M,)
+ The beta-sphere/trust-region radius of each atom. These are spheres
+ centered at each maxima that reduces the convergence of each point.
+ maximas: ndarray(M,3)
+ The position of each atoms in the molecule.
+ ss_0: float, optional
+ The initial step-size of the ODE (RK45) solver.
+ max_ss: float, optional
+ Maximum step-size of the ODE (RK45) solver.
+ tol: float, optional
+ Tolerance for the adaptive step-size.
+ maxiter: int, optional
+ The maximum number of iterations of taking a step in the ODE solver.
+ iter_nna: int, optional
+ Iteration to start checking for non-nuclear attractors and for the step-size to be reduced by 10.
+ hess_func: callable(ndarray(N, 3)->ndarray(N, 3, 3)), optional
+ The Hessian of the electron density. If this is provided, then non-nuclear attractors will be found.
+ Adds a default Lebedev/angular grid of degree fifty and 0.1 a.u. to the `beta-spheres` if it is
+ provided.
+ terminate_if_other_basin_found : bool
+ If true, then if multiple basin values were found, then the ODE solver will exit.
+ If false, then the ODE solver will run until all points enter one of the
+ beta-sphere/trust-region.
+ check_for_nna: bool
+ If true, then it checks for Non-nuclear attractor. Requires `hess_func` to be provided.
+
+ Returns
+ -------
+ ndarray(N,), ndarray(M, 3):
+ - Integer array that assigns each point to a basin/maxima/atom of the molecule. If basin value is -2, then
+ the point converges to a BCP or RCP.
+ If value is negative one, then the point wasn't assigned to a basin.
+ - Array of 3D coordinates of the maximas. New potential maximas are found and updated here.
+ This is only returned if `hess_func` is provided.
+
+ Notes
+ -----
+ - Setting tolerance to 1e-10 is equivalent to rtol=1e-4, atol=1e-7 for method="RK45" in scipy.integrate.solve_ivp.
+ - Setting tolerance to 1e-9 is equivalent to rtol=1e-3, atol=1e-6 for method="RK45" in scipy.integrate.solve_ivp.
+
+ """
+ norm_grad_func = _get_normalized_gradient_func(grad_func)
+
+ numb_pts = initial_pts.shape[0]
+ if isinstance(ss_0, float):
+ ss = np.ones((numb_pts, 1)) * ss_0
+ elif isinstance(ss_0, np.ndarray):
+ if not ss_0.shape[1] == 1:
+ raise ValueError(f"Steps-size {ss_0.shape} should have shape of the form (N, 1).")
+ ss = ss_0.copy()
+ else:
+ raise TypeError(f"Step-size ss_0 {type(ss_0)} should have type float or array.")
+
+ pts = initial_pts.copy()
+ dens_vals0 = dens_func(initial_pts)
+ # print("Intial density values ", dens_vals0)
+
+ assigned_basins = (-1) * np.ones((numb_pts,), dtype=int)
+ not_found_indices = np.arange(numb_pts)
+ first_basin = None # First basin value that was found
+ niter = 0 # Number of iterations
+ grad0 = norm_grad_func(pts) # Avoids re-computing the gradient twice, used to check for NNA
+ print("START STEEPEST-ASCENT")
+ import time
+ while len(not_found_indices) != 0:
+ if niter == maxiter:
+ raise RuntimeError(
+ f"Number of iterations reached maximum {niter}, "
+ f"this may be because of a non-nuclear attractor (NNA) which may cause the ODE "
+ f"to cycle between two points. Repeat this calculation by including the "
+ f"non-nuclear attractor to the list of critical points."
+ )
+
+ start = time.time()
+ y_four, y_five = _RK45_step(pts, norm_grad_func, ss, grad0)
+ final = time.time()
+ if niter == 0:
+ print(f"Number Iterations {niter} RK Step {final - start} and number of points left {len(not_found_indices)}")
+
+ # Update step-size
+ # print("Step size used", ss)
+ ss = (tol * ss / (2.0 * np.linalg.norm(y_five - y_four, axis=1)[:, None]))**0.25
+ ss[ss > max_ss] = max_ss
+
+ # Get density values and if the density-values decreased, reduce step-size
+ dens_vals1 = dens_func(y_five)
+ indices = np.where(dens_vals1 <= dens_vals0)[0]
+ # print("New density values ", dens_vals1)
+ # print("Indices that density decreaed ", indices)#, dens_vals1[indices], dens_vals0[indices])
+ if len(indices) != 0:
+ # print("Density Decreased")
+ # print("Gradients here", grad_func(pts[indices, :]))
+ y_five[indices, :] = pts[indices, :]
+ ss[indices] *= 0.25
+ # TODO: Check here if the density is equal to the isosurface value, then stop.
+
+ # Check any points are within the beta-spheres and remove them if they converged.
+ dist_maxima = cdist(y_five, maximas)
+ # print("Distance maxima", dist_maxima)
+ beta_sph = dist_maxima <= beta_spheres
+ which_basins = np.where(beta_sph)
+ # print("beta_sphereS ", beta_spheres)
+ # print("which pts are within basin based on beta-sphere", which_basins)
+ # print("basins", which_basins[1])
+ if len(which_basins[0]) != 0:
+ assigned_basins[not_found_indices[which_basins[0]]] = which_basins[1]
+ not_found_indices = np.delete(not_found_indices, which_basins[0])
+ y_five = np.delete(y_five, which_basins[0], axis=0)
+ ss = np.delete(ss, which_basins[0])[:, None]
+ dens_vals1 = np.delete(dens_vals1, which_basins[0])
+
+ # Terminate early if multiple basins were found
+ if terminate_if_other_basin_found:
+ unique_basins = np.unique(which_basins[1])
+ # If the number of basins is greater than one then exit
+ if len(unique_basins) > 1:
+ return assigned_basins
+ else:
+ # If a new basin is found then exit
+ if first_basin is None:
+ first_basin = unique_basins[0]
+ elif first_basin != unique_basins[0]:
+ return assigned_basins
+ # print("New indices to still do: ", not_found_indices)
+
+ # For the points that didn't converge, check for non-nuclear attractors (NNA)
+ if len(y_five) != 0:
+ grad_vals = grad_func(y_five)
+ grad0 = grad_vals / np.linalg.norm(grad_vals, axis=1)[:, None]
+ # Check maxiter is relatively large, to avoid computing the Hessian multiple times.
+ if niter >= maxiter - iter_nna:
+ i_smallg = np.where(
+ (np.all(np.abs(grad_vals) < 1e-5, axis=1)) & (dens_vals1 > 0.001)
+ )[0]
+ # Decrease the step-size because the gradient doesn't get small enough
+ ss /= 10
+ if len(i_smallg) != 0:
+ # if Hessian is provided, then check for NNA
+ if hess_func is not None:
+ hess = hess_func(y_five[i_smallg])
+ eigs = np.linalg.eigvalsh(hess)
+ print(eigs)
+ # Check if local maxima:
+ which_is_nna = np.where(np.all(eigs < -1e-10, axis=1))[0]
+ if check_for_nna and len(which_is_nna) != 0:
+ print("Found NNA")
+ nna_indices = i_smallg[which_is_nna]
+ # Found a NNA, Remove Duplicates, Update maxima and beta-spheres
+ new_maximas, indices_to_delete = delete_duplicate_pts(y_five[nna_indices], 1e-3)
+
+ maximas = np.vstack((maximas, new_maximas))
+ beta_spheres = np.append(beta_spheres, [0.01] * len(new_maximas))
+
+ assigned_basins[not_found_indices[nna_indices]] = len(maximas) - 1
+ not_found_indices = np.delete(not_found_indices, nna_indices)
+ y_five = np.delete(y_five, nna_indices, axis=0)
+ ss = np.delete(ss, nna_indices)[:, None]
+ dens_vals1 = np.delete(dens_vals1, nna_indices)
+ grad0 = np.delete(grad0, nna_indices, axis=0)
+
+ # Check if it converged to a BCP or RCP, then it is precisely on the surface!
+ numb_neg = np.sum((eigs < -1e-10).astype(int), axis=1)
+ which_bcp = np.where(numb_neg == 1)[0]
+ which_rcp = np.where(numb_neg == 2)[0]
+ if len(which_bcp) == 1 or len(which_rcp) == 2:
+ nna_indices = i_smallg[which_is_nna]
+
+ # Set them to -2
+ assigned_basins[not_found_indices[nna_indices]] = -2
+ not_found_indices = np.delete(not_found_indices, nna_indices)
+ y_five = np.delete(y_five, nna_indices, axis=0)
+ ss = np.delete(ss, nna_indices)[:, None]
+ dens_vals1 = np.delete(dens_vals1, nna_indices)
+ grad0 = np.delete(grad0, nna_indices, axis=0)
+
+ raise RuntimeError(f"Found BCP and RCP points. Handle this later. \n")
+
+ elif not check_for_nna:
+ # Assign these points to basin -2, delete them.
+ print(f"Maximas {maximas}")
+ print(f"Where the NNCP is {y_five[i_smallg]}")
+ print(f"Gradient {grad_vals[i_smallg]}")
+ print(f"Density {dens_vals1[i_smallg]}")
+ print(f"Distance to each maxima {cdist(y_five[i_smallg], maximas)}")
+ raise NonNuclearAttractionException(y_five[i_smallg])
+
+ # Update next iteration
+ pts = y_five.copy()
+ dens_vals0 = dens_vals1
+ niter += 1
+
+ # input("Next step")
+ # print("Final basins ", assigned_basins)
+ return assigned_basins, maximas
+
+
+def steepest_ascent_rk45(
+ initial_pts, dens_func, grad_func, ss_0=1e-7, tol=1e-7, max_ss=0.25, tol_conv=1e-10, maxiter=2000
+):
+ r"""
+ Solves the following problem ODE using Runge-Kutta of order 4(5) with adaptive step-size
+
+ .. math::
+ \frac{d(r(t))}{dt} = \frac{\nabla \rho(r(t))}{|| \rho(r() ||}
+
+ over a set of points.
+
+ Parameters
+ ----------
+ initial_pts: ndarray(N, 3)
+ Initial points to solve for steepest-ascent/backtracing.
+ dens_func: callable(ndarray(N,3), ndarray(N))
+ The electron density function.
+ grad_func: callable(ndarray(N,3), ndarray(N,3))
+ The gradient of the electron density.
+ beta_spheres: ndarray(M,)
+ The beta-sphere/trust-region radius of each atom. These are spheres
+ centered at each maxima that reduces the convergence of each point.
+ maximas: ndarray(M,3)
+ The position of each atoms in the molecule.
+ ss_0: float, optional
+ The initial step-size of the ODE (RK45) solver.
+ max_ss: float, optional
+ Maximum step-size of the ODE (RK45) solver.
+ tol_conv: float, optional
+ Tolerance for the convergence of a point from a ODE step.
+ tol: float, optional
+ Tolerance for the adaptive step-size.
+ maxiter: int, optional
+ The maximum number of iterations of taking a step in the ODE solver.
+ terminate_if_other_basin_found : bool
+ If true, then if multiple basin values were found, then the ODE solver will exit.
+ If false, then the ODE solver will run until all points enter one of the
+ beta-sphere/trust-region.
+
+ Returns
+ -------
+ ndarray(N,):
+ Integer array that assigns each point to a basin/maxima/atom of the molecule.
+ If value is negative one, then the point wasn't assigned to a basin.
+
+ """
+ norm_grad_func = _get_normalized_gradient_func(grad_func)
+
+ numb_pts = initial_pts.shape[0]
+ if isinstance(ss_0, float):
+ ss = np.ones((numb_pts, 1)) * ss_0
+ elif isinstance(ss_0, np.ndarray):
+ if not ss_0.shape[1] == 1:
+ raise ValueError(f"Steps-size {ss_0.shape} should have shape of the form (N, 1).")
+ ss = ss_0.copy()
+ else:
+ raise TypeError(f"Step-size ss_0 {type(ss_0)} should have type float or array.")
+
+ pts = initial_pts.copy()
+ dens_vals0 = dens_func(initial_pts)
+
+ not_found_indices = np.arange(numb_pts)
+ niter = 0 # Number of iterations
+ grad0 = norm_grad_func(pts) # Avoids re-computing the gradient twice, used to check for NNA
+ pts_curr = pts.copy()
+ final_pts = []
+ while len(not_found_indices) != 0:
+ if niter == maxiter:
+ raise RuntimeError(
+ f"Number of iterations reached maximum {niter}, "
+ f"this may be because of a non-nuclear attractor (NNA) which may cause the ODE "
+ f"to cycle between two points. Repeat this calculation by including the "
+ f"non-nuclear attractor to the list of critical points."
+ )
+ y_four, y_five = _RK45_step(pts_curr, norm_grad_func, ss, grad0)
+
+ # Update step-size
+ # print("Step size used", ss)
+ ss = (tol * ss / (2.0 * np.linalg.norm(y_five - y_four, axis=1)[:, None])) ** 0.25
+ ss[ss > max_ss] = max_ss
+
+ # Get density values and if the density-values decreased, reduce step-size
+ dens_vals1 = dens_func(y_five)
+ indices = np.where(dens_vals1 <= dens_vals0)[0]
+ # print("New density values ", dens_vals1)
+ # print("Indices that density decreaed ", indices)#, dens_vals1[indices], dens_vals0[indices])
+ if len(indices) != 0:
+ # print("Density Decreased")
+ # print("Gradients here", grad_func(pts[indices, :]))
+ y_five[indices, :] = pts_curr[indices, :]
+ ss[indices] *= 0.25
+
+ # Check any points are within the beta-spheres and remove them if they converged.
+ converged = np.where(np.all(np.abs(y_five - pts_curr) < tol_conv, axis=1))[0]
+ if len(converged) != 0:
+ not_found_indices = np.delete(not_found_indices, converged)
+ final_pts.append(y_five[converged])
+ y_five = np.delete(y_five, converged, axis=0)
+ ss = np.delete(ss, converged)[:, None]
+ dens_vals1 = np.delete(dens_vals1, converged)
+
+ # Update next iteration
+ grad0 = norm_grad_func(y_five)
+ pts_curr = y_five.copy()
+ dens_vals0 = dens_vals1
+ niter += 1
+
+ # input("Next step")
+ # print("Final basins ", assigned_basins)
+ return np.vstack(final_pts)
+
+
+def gradient_path(pt, grad_func, use_norm=False, maximas=None, t_span=(0, 1000), method="LSODA", max_step=100,
+ t_inc=400, max_tries=10, first_step=1e-3, beta_spheres=-np.inf):
+ r"""
+ Solves the final point of steepest-ascent starting at a single pt.
+
+ If maximas is not provided, then termination occurs due to convergence.
+
+ Parameters
+ ----------
+ pt : ndarray(3,)
+ The initial point of the ODE.
+ grad_func : callable(ndarray(N, 3) -> ndarray(M, 3))
+ The gradient function for the steepest-ascent
+ use_norm: bool
+ If true, then the grad_func output is normalized. This isn't useful for optimizing
+ the centers to obtain the local maximas.
+ maximas: (ndarray(M, 3), None)
+ The maximas of the density. If it isn't provided then termiantion occurs due to
+ convergence between two consequent points of the ODE.
+ t_span: (float, float)
+ The lower-bound and upper-bound of solving the ODE time-step.
+
+ Returns
+ -------
+ ndarray(3,)
+ The final point of the steepest-ascent path.
+
+ """
+ is_converged = False
+ y0 = pt.copy()
+ numb_times = 0
+
+ norm_grad_func = grad_func
+ if use_norm:
+ norm_grad_func = _get_normalized_gradient_func(grad_func)
+
+ def grad(t, x):
+ return norm_grad_func(np.array([x]))[0]
+
+ while not is_converged and numb_times < max_tries:
+ sol = solve_ivp(
+ grad,
+ y0=y0,
+ t_span=t_span,
+ method=method,
+ max_step=max_step,
+ first_step=first_step,
+ )
+ # print(sol)
+ assert sol["success"], "ODE was not successful."
+ # If it is close to a maxima or within any of the beta-spheres, then stop.
+ if maximas is not None:
+ last_y_val = sol["y"][:, -1]
+ dist_maxima = np.linalg.norm(last_y_val - maximas, axis=1)
+ if np.any(dist_maxima < 0.1) or np.any(dist_maxima <= beta_spheres):
+ return sol["y"][:, -1]
+ # if maximas not specified, then just look at if it converged.
+ else:
+ convergence = np.linalg.norm(sol["y"][:, -2] - sol["y"][:, -1])
+ if convergence < 1e-1:
+ return sol["y"][:, -1]
+ # No convergence occured, so increaes t-span.
+ print(sol["y"][:, -1], t_span, "YE")
+ t_span = (t_span[1], t_span[1] + t_inc)
+ y0 = sol["y"][:, -1]
+ numb_times += 1
+
+ if numb_times == max_tries:
+ raise RuntimeError(f"No convergence in normalized_gradient path pt {pt},"
+ f" solution {sol['y'][:, -1]}, t_span {t_span}")
diff --git a/chemtools/topology/qtaim.py b/chemtools/topology/qtaim.py
new file mode 100644
index 00000000..a8068b8f
--- /dev/null
+++ b/chemtools/topology/qtaim.py
@@ -0,0 +1,1031 @@
+# -*- coding: utf-8 -*-
+# ChemTools is a collection of interpretive chemical tools for
+# analyzing outputs of the quantum chemistry calculations.
+#
+# Copyright (C) 2016-2019 The ChemTools Development Team
+#
+# This file is part of ChemTools.
+#
+# ChemTools is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# ChemTools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see
+#
+# --
+import warnings
+from collections import OrderedDict
+import itertools
+import numpy as np
+from scipy.spatial.distance import cdist
+from scipy.spatial.transform.rotation import Rotation as r
+
+from grid.angular import AngularGrid
+
+from chemtools.topology.surface import SurfaceQTAIM
+from chemtools.topology.utils import (
+ construct_radial_grids,
+ determine_beta_spheres_and_nna,
+ find_optimize_centers,
+ solve_for_oas_points,
+)
+from chemtools.topology.ode import find_basins_steepest_ascent_rk45
+
+
+__all__ = ["qtaim_surface_vectorize"]
+
+
+def _classify_rays_as_ias_or_oas(
+ maximas, maximas_to_do, all_points, all_basins, index_to_atom, numb_rays_to_atom, numb_rad_to_radial_shell
+):
+ r"""
+ Classify all rays in a molecule as either crossing the outer or inner atomic surface.
+
+ Also provides the interval limits [r_0, r_1] of each ray that crosses the IAS.
+
+ Parameters
+ ----------
+ maximas: ndarray(M, 3)
+ Optimized centers of the electron density where the rays need to be classified. Doesn't need to be
+ all of the centers.
+ all_points: ndarray(N, 3)
+ All points in all rays across each atom in a molecule.
+ all_basins: ndarray(N, 3)
+ All basin values that were assigned for each point in all rays
+ index_to_atom: list[int]
+ Gives the indices that assings each point in `all_points` to each atom, i.e.
+ [0, i_1, i_2, ..., N] implies `all_points[0: i_1]` corresponds to atom 1.
+ numb_rays_to_atom: list[int]
+ List of size `M`, that holds the number of angular points or rays in each atom.
+ Used to assign which points in `all_points` corresponds to which ray.
+ numb_rad_to_radial_shell: list[list[int]]
+ For each atom, for each angular pt/ray, tells the number of radial point.
+ Used to assign which points in `all_points` corresponds to which point in each ray.
+
+ Returns
+ -------
+ ias, oas, ias_bnds, ias_basins: list[list[int]], list[list[int]], list[OrderedDict], list[list[int]]
+ A list of size `M`, that holds a list of indices of which angular pt/ray corresponds to
+ either intersecting the ias or oas. The final element is a list of size `M`, of
+ ordered dictionary whose keys are the indices of ias and items are the lower
+ and upper-bound of the radius where the intersection occurs somewhere inbetween.
+ `ias_basins` contains which basin each ias pt in `ias` switches to, when it crosses the boundary.
+
+ These are repeat again for searching for second intersection ias_2, ias_basins_2, ias_bnds_2, and
+ similarly for the third intersection.
+
+ """
+ numb_maximas = len(maximas)
+ oas = [[] for _ in range(numb_maximas)] # outer atomic surface
+ ias = [[] for _ in range(numb_maximas)] # inner atomic surface for first intersection.
+
+ # The points that all converge to the same point are OAS, and are isosurface points. Remove
+ # them
+ # First to get the maxima, you would use index_to_atom.
+ # To get each ray, assuming the number of pts in each ray is the same, you would do
+ # use numb_rad_to_atom
+ # Initially each ray has the same number of points, then one can re-shape both
+ # points and basins to make it easy to index each ray of each maxima, then classify
+ # it as either a IAS or OAS. Here you can determine whether it crosses twice, and
+ # determine which ray requires special attention.
+ #
+ # print("Index to atom ", index_to_atom, all_points.shape)
+ ias_bnds = [OrderedDict() for _ in range(0, numb_maximas)] # Keys are Points index
+ ias_basins = [OrderedDict() for _ in range(numb_maximas)] # Keys are Points index
+
+
+ ias_2 = [[] for _ in range(numb_maximas)] # inner atomic surface for second intersection.
+ ias_3 = [[] for _ in range(numb_maximas)] # inner atomic surface for third intersection.
+ ias_bnds_2 = [OrderedDict() for _ in range(0, numb_maximas)] # Keys are Points index
+ ias_basins_2 = [OrderedDict() for _ in range(numb_maximas)] # Keys are Points index
+ ias_bnds_3 = [OrderedDict() for _ in range(0, numb_maximas)] # Keys are Points index
+ ias_basins_3 = [OrderedDict() for _ in range(numb_maximas)] # Keys are Points index
+ np.set_printoptions(threshold=np.inf)
+ for i_do, i_maxima in enumerate(maximas_to_do):
+ #print("ATom i ", i_maxima)
+ #print("Starting and Final index", index_to_atom[i_maxima], index_to_atom[i_maxima + 1])
+ basins_a = all_basins[index_to_atom[i_do]:index_to_atom[i_do + 1]] # Basin of atom
+ points_a = all_points[index_to_atom[i_do]:index_to_atom[i_do + 1]] # Points of atom
+ numb_rad_pts = numb_rad_to_radial_shell[i_do]
+
+ #print("Basins of ith atom ", basins_a)
+ #print(f"Number of radial points (Size M) {numb_rad_pts}, numb_rad_pts.shape {len(numb_rad_pts)} Sum of num_rad_pts {np.sum(numb_rad_pts)}")
+ #print("Number of angular points in this atom", numb_rays_to_atom[i_maxima])
+ i_ray = 0
+ for i_ang in range(numb_rays_to_atom[i_do]):
+ #print("Angular pt j", i_ang)
+ #print("Number of radial points in this angular pt ", numb_rad_pts[i_ang])
+
+ # Get the basin of the ray
+ basins_ray = basins_a[i_ray:i_ray + numb_rad_pts[i_ang]]
+ #print("Basin of the ray ", basins_ray)
+
+ # Classify basins as either OAS and IAS, if IAS, then count the number of
+ # intersections of the IAS. In addition, get the l_bnd, u_bnd of each intersection.
+ # Groups basins i.e. [1, 1, 1, 0, 0, 0, 1, 1, 2, 2] -> [1, 0, 1, 2]
+ # group_by = {(0, [0, 0, 0, 0, etc], (1, [1, 1, 1, etc)]}
+ group_by = [(k, list(g)) for k, g in itertools.groupby(basins_ray)]
+ unique_basins = np.array([x[0] for x in group_by])
+ #print(basins_ray == i_maxima)
+ #print(group_by)
+
+ # All pts in the ray got assigned to the same basin of the maxima
+ # or if the numb_rad_pts is zero, then this means that the ray has density value less than
+ # the isosurface value (starting from the beta-sphere determination), thus it is an oas point.
+ if (len(unique_basins) == 1 and unique_basins[0] == i_maxima) or numb_rad_pts[i_ang] == 0:
+ # This implies it is an OAS point, else then it is an IAS with a bad ray.
+ oas[i_maxima].append(i_ang)
+ else:
+ # The point is an IAS, determine the number of intersections.
+ conv_to_atom = unique_basins == i_maxima
+ numb_intersections = np.sum(conv_to_atom)
+ l_bnd_pad = 0.0 # This is the case with a bad ray, loweres the l_bnd by this amount
+ if numb_intersections == 0:
+ # This is IAS with a bad ray, would have to re-determine the l_bnd. This was an IAS point
+ # Whose ray at the boundary is assigned to different basins depending on the accuracy of ODE solver.
+ # This is IAS with a bad ray, would have to re-determine the l_bnd
+ l_bnd_pad = 0.1
+ if 0 <= numb_intersections:
+ # print("IAS With one Intersection.")
+ # Determine lower and upper-bound Point on ray.
+ if group_by[0][1][0] == i_maxima:
+ # if the ray started with a basin that converged ot i_maxima, then take the upper bound
+ # to be the when it started to switch to a different basin.
+ index_u_bnd = len(group_by[0][1])
+ index_l_bnd = index_u_bnd - 1
+ else:
+ # Here the ray is a bad ray (numb_intersections == 0) in the sense that the start of the ray
+ # should have converged to i_maxima but it dind't, and so take the u_bnd to be when it or sure
+ # converges to to different maxima from i_maxima.
+ index_u_bnd = min(2, len(group_by[0][1]) - 1) # Subtract one so that i_ray + index_u_bnd <= length of ray
+ index_l_bnd = 0 # Take it to be zero since l_bnd_pad in this case is set to 0..
+ if index_u_bnd == index_l_bnd:
+ raise RuntimeError(f"Algorithm Error .")
+ # Determine radius from the upper and lower bound.
+ r_ubnd = np.linalg.norm(points_a[i_ray + index_u_bnd] - maximas[i_maxima])
+ r_lbnd = np.linalg.norm(points_a[i_ray + index_l_bnd] - maximas[i_maxima])
+ # Update containers
+ ias_bnds[i_maxima][i_ang] = [max(0.1, r_lbnd - l_bnd_pad), r_ubnd]
+ ias[i_maxima].append(i_ang)
+ # Get the basins where it switches
+ ias_basins[i_maxima][i_ang] = unique_basins[np.argmax(unique_basins != i_maxima)]
+ # print("Radius Lower and Upper bound ", r_lbnd, r_ubnd)
+
+ # Gather information about other intersections
+ if numb_intersections > 1:
+ # print("Angular pt j", i_ang)
+ # print("Number of radial points in this angular pt ", numb_rad_pts[i_ang])
+ # print("IAS With Multiple Intersections")
+ # print(group_by)
+ # print(unique_basins)
+
+ # Figure out if the number intersections is two or three. Code only checks up to three.
+ index_ray = 0 # Keeps track of how many points to go further
+ more_than_three = False
+ for i, (basin, assigned_basin_vals) in enumerate(group_by):
+ if i != 0:
+ # Multiple intersections found, find correct intervals to search for intersections
+ if basin == i_maxima:
+ if more_than_three:
+ print("Angular pt j", i_ang)
+ print("Number of radial points in this angular pt ", numb_rad_pts[i_ang])
+ print(f"Unique basins {unique_basins}")
+ print(f"Group by {group_by}")
+ # raise RuntimeError(f"More than three intersections was found."
+ # f" Code doesn't check.")
+ warnings.warn(f"More than three intersections was found."
+ f" Code doesn't check.")
+
+ # Add the second intersection
+ ias_2[i_maxima].append(i_ang)
+ ias_basins_2[i_maxima][i_ang] = group_by[i - 1][0]
+ l_bnd = points_a[i_ray + index_ray - 1]
+ u_bnd = points_a[i_ray + index_ray]
+ r_ubnd = np.linalg.norm(u_bnd - maximas[i_maxima])
+ r_lbnd = np.linalg.norm(l_bnd - maximas[i_maxima])
+ ias_bnds_2[i_maxima][i_ang] = [r_lbnd, r_ubnd]
+
+ # Check for the third intersection that occurs afterwards
+ if i + 1 < len(group_by):
+ ias_3[i_maxima].append(i_ang)
+ ias_basins_3[i_maxima][i_ang] = group_by[i + 1][0]
+ i_lbnd = i_ray + index_ray + len(assigned_basin_vals) - 1
+ i_ubnd = i_ray + index_ray + len(assigned_basin_vals)
+ l_bnd = points_a[i_lbnd]
+ u_bnd = points_a[i_ubnd]
+ r_ubnd = np.linalg.norm(u_bnd - maximas[i_maxima])
+ r_lbnd = np.linalg.norm(l_bnd - maximas[i_maxima])
+ ias_bnds_3[i_maxima][i_ang] = [r_lbnd, r_ubnd]
+ more_than_three = True
+ index_ray += len(assigned_basin_vals)
+
+ # print(ias_2[i_maxima])
+ # print(ias_basins_2[i_maxima])
+ # print(ias_bnds_2[i_maxima])
+ # print(ias_3[i_maxima])
+ # print(ias_basins_3[i_maxima])
+ # print(ias_bnds_3[i_maxima])
+ # import matplotlib
+ # import matplotlib.pyplot as plt
+ # from mpl_toolkits import mplot3d
+ # matplotlib.use("Qt5Agg")
+ # fig = plt.figure()
+ # ax = plt.axes(projection='3d')
+ # p = points_a[i_ray: i_ray + numb_rad_pts[i_ang]]
+ # ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="g", s=60)
+ # ax.scatter(maximas[:, 0], maximas[:, 1], maximas[:, 2], color="r", s=60)
+ # plt.show()
+ else:
+ # Store [l_bnd, u_bnd] inside ias_bnds_2[i_maxima] for searching to the IAS in case
+ # the user wants to search for multiple intersections.
+ ias_bnds_2[i_maxima][i_ang] = [[r_ubnd, r_ubnd]]
+
+ i_ray += numb_rad_pts[i_ang]
+ return ias, oas, ias_bnds, ias_basins, ias_2, ias_bnds_2, ias_basins_2, ias_3, ias_bnds_3, ias_basins_3
+
+
+def construct_all_points_of_rays_of_atoms(
+ maximas, angular_pts, radial_grid, maximas_to_do, dens_func, iso_val
+):
+ r"""
+ Construct all points of all rays across all molecules.
+ """
+ # Need a way to track which points correspond to which maxima,
+ # Need a way to track which sets of points correspond to a ray
+ # index_to_atom = [0, i_1, i_1 + i_2, ..., \sum_j^M i_j] first index always zero and last
+ # always the number of points.
+ index_to_atom = [0] * (len(maximas_to_do) + 1) # First index is always zero
+ NUMB_RAYS_TO_ATOM = [len(angular_pts[i]) for i in maximas_to_do]
+ numb_rad_to_radial_shell = [] # List of List: Number of radius points per ray
+ points = []
+ print(NUMB_RAYS_TO_ATOM)
+ for i_do, i in enumerate(maximas_to_do): #range(0, numb_maximas):
+ # Construct all points on the atomic grid around atom i
+ radial_shells = np.einsum("i,jk->jik", radial_grid[i], angular_pts[i])
+ print("Number of radial points", len(radial_grid[i]))
+ rs = maximas[i, None, None, :] + radial_shells # has shape (K, N, 3)
+ rs = np.reshape(rs, (rs.shape[0] * rs.shape[1], 3)) # has shape (KN, 3)
+ print("Total number of points ", rs.shape)
+
+ # Record information what indices it corresponds to
+ numb_rad_to_radial_shell.append([len(radial_grid[i])] * NUMB_RAYS_TO_ATOM[i_do])
+
+ # First remove the density values that are less than isosurface values.
+ density_vals = dens_func(rs)
+ indices = np.where(density_vals < iso_val)[0]
+ if len(indices) != 0:
+ rs = np.delete(rs, indices, axis=0)
+ # Convert from index I to (i) where i is the angular index and j is the radial.
+ for k in indices:
+ numb_rad_to_radial_shell[i_do][k // len(radial_grid[i])] -= 1
+
+ index_to_atom[i_do + 1] = index_to_atom[i_do] + rs.shape[0] # Add what index it is
+ points.append(rs)
+ points = np.vstack(points) # has shape (Product_{i=1}^M K_i N_i, 3)
+ print("Total number of points Over All Molecules ", points.shape)
+ return points, index_to_atom, NUMB_RAYS_TO_ATOM, numb_rad_to_radial_shell
+
+
+def _solve_intersection_of_ias_interval(
+ maximas, ias_indices, angular_pts, dens_func, grad_func, beta_spheres, bnd_err, ss_0, max_ss, tol, ias_lengths
+):
+ r"""
+ Solves the intersection of the ray to the inner-atomic surface.
+
+ A radial grid is constructed over each ray based on `ias_indices`. The basin value
+ is assigned to each point, and the point where it swtiches basins is recorded.
+ The process is further repeated with a smaller step-size until the distance between two
+ points on the ray is less than `bnd_err`.
+
+ Parameters
+ ----------
+ maximas: ndarray(M, 3)
+ Optimized centers of the electron density.
+ ias_indices: ndarray(N, 5)
+ Rows correspond to each ray that intersects the IAS.
+ First index is which index of maxima it originates from, then second
+ index is index of angular point/ray, third index is the lower bound radius and fourth
+ index is the upper-bound radius, fifth index is the step-size.
+ The sixth index holds which index of the `IAS` list it points to.
+ angular_pts: list[ndarray]
+ List of size `M` of the angular points over each maxima.
+ dens_func:
+ The density of electron density.
+ grad_func:
+ The gradient of the electron density.
+ beta_spheres: ndarray(M,)
+ Beta-spheres radius of each atom.
+ bnd_err: float
+ The error of the intersection of the IAS. When the distance to two consequent points
+ on the ray crosses different basins is less than this error, then the midpoint is accepted
+ as the final radius value.
+ ss_0: float, optional
+ The initial step-size of the ODE (RK45) solver.
+ max_ss: float, optional
+ Maximum step-size of the ODE (RK45) solver.
+ tol: float, optional
+ Tolerance for the adaptive step-size.
+
+ Return
+ -------
+ List[ndarray()], list[list[int]]:
+ The first list holds arrays that gives the radius value that intersects the IAS or OAS.
+ The second list of size `M`, holds which ias crosses which other basin.
+
+ """
+ r_func = [np.zeros((len(angular_pts[i]),), dtype=np.float64) for i in range(len(maximas))]
+ basin_ias = [[-1] * ias_lengths[i] for i in range(len(maximas))]
+ while len(ias_indices) != 0:
+ # Construct New Points
+ points = []
+ numb_pts_per_ray = []
+ for (i_maxima, i_ang, l_bnd, u_bnd, ss, _, _) in ias_indices:
+ # print((i_maxima, i_ang, l_bnd, u_bnd, ss))
+ ray = (
+ maximas[int(i_maxima)] +
+ np.arange(l_bnd, u_bnd + ss, ss)[:, None] * angular_pts[int(i_maxima)][int(i_ang), :]
+ )
+ points.append(ray)
+ numb_pts_per_ray.append(len(ray))
+ points = np.vstack(points)
+
+ # Solve for basins
+ basins, _ = find_basins_steepest_ascent_rk45(
+ points, dens_func, grad_func, beta_spheres, maximas, tol=tol, max_ss=max_ss, ss_0=ss_0
+ )
+ # print("Basins", basins)
+
+ # Refine the rays further
+ index_basins = 0 # Index to iterate through basins
+ converge_indices = []
+ # print("Average step-size", np.mean(ias_indices[:, -1]))
+ make_ode_solver_more_accurate = False
+ for i, (i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch, i_ias) in enumerate(ias_indices):
+ basins_ray = basins[index_basins:index_basins + numb_pts_per_ray[i]]
+ # print((i_maxima, i_ang, l_bnd, u_bnd, ss))
+ # print("Basins ", i, basins_ray)
+
+ # Basins that switch index
+ i_switch = np.argmax(basins_ray != i_maxima)
+ # print("Index of switch ", i_switch)
+ if i_switch == 0:
+ print("Basins with bad ray: ", basins_ray, (i_maxima, i_ang, l_bnd, u_bnd, ss))
+ # raise ValueError(f"This ray lost it's ability to be an IAS point, as all points converged to the same maxima. "
+ # f"Fix this.")
+
+ # Update ias_indices for the next iteration for example l_bnd, u_bnd, step-size
+ # This lower and upper bound is chosen to guarantee that the IAS point will be found.
+ new_l_bnd = l_bnd - 20.0 * ss
+ new_u_bnd = u_bnd + 20.0 * ss
+ new_ss = max(ss / 10.0, bnd_err)
+ # Make ODE solver more accurate
+ make_ode_solver_more_accurate = True
+ # print("New step-size ", new_ss)
+ ias_indices[i] = [i_maxima, i_ang, new_l_bnd, new_u_bnd, new_ss, basin_switch, i_ias]
+ else:
+ # If ss was less than bnd_err, then we converge and should stop.
+ if ss <= bnd_err:
+ # Take midpoint to be the radius of intersection
+ radius_mid_pt = (2.0 * l_bnd + ss * i_switch) / 2.0
+ r_func[int(i_maxima)][int(i_ang)] = radius_mid_pt
+ basin_ias[int(i_maxima)][int(i_ias)] = basins_ray[i_switch]
+ converge_indices.append(i) # Put in list to remove indices.
+ else:
+ # Update ias_indices for the next iteration for example l_bnd, u_bnd, step-size
+ new_l_bnd = l_bnd + ss * (i_switch - 1)
+ new_u_bnd = l_bnd + ss * (i_switch)
+ new_ss = max(ss / 10.0, bnd_err)
+ # print("New step-size ", new_ss)
+ ias_indices[i] = [i_maxima, i_ang, new_l_bnd, new_u_bnd, new_ss, basin_switch, i_ias]
+
+ # Update index for the next ray
+ index_basins += numb_pts_per_ray[i]
+ # print("COnvergence indices", converge_indices)
+ if make_ode_solver_more_accurate:
+ tol /= 2.0
+ max_ss = min(0.1, max_ss)
+ ss_0 /= 2.0
+
+ # Remove converged indices
+ ias_indices = np.delete(ias_indices, converge_indices, axis=0)
+
+ # Solve for multiple intersections
+ return r_func, basin_ias
+
+
+def _solve_intersection_of_ias_point(
+ maximas, ias_indices, angular_pts,
+ dens_func, grad_func, beta_spheres, bnd_err, ias_lengths, ss_0, max_ss, tol, hess_func=None
+):
+ r"""
+ Solves the intersection of the ray to the inner-atomic surface.
+
+ A point is associated to each ray based on `ias_indices`. The basin value
+ is assigned to each point, and the point is moved along the ray until it keeps switching basins.
+ The process is further repeated with a smaller step-size until the distance between two
+ points on the ray is less than `bnd_err`.
+
+ Parameters
+ ----------
+ maximas: ndarray(M, 3)
+ Optimized centers of the electron density.
+ ias_indices: ndarray(N, 6)
+ Rows correspond to each ray that intersects the IAS.
+ First index is which index of maxima it originates from, then second
+ index is index of angular point/ray, third index is the lower bound radius
+ and fourth index is the upper-bound radius, fifth index is the step-size.
+ The fifth index holds which basin the IAS point switches to. Note it may
+ not be the true basin value that it switches to.
+ The sixth index holds which index of the `IAS` list it points to.
+ angular_pts: list[ndarray]
+ List of size `M` of the angular points over each maxima.
+ dens_func:
+ The density of electron density.
+ grad_func:
+ The gradient of the electron density.
+ beta_spheres: ndarray(M,)
+ Beta-spheres radius of each atom.
+ bnd_err: float
+ The error of the intersection of the IAS. When the distance to two consequent points
+ on the ray crosses different basins is less than this error, then the midpoint is accepted
+ as the final radius value.
+ ias_lengths: list[int]
+ List of length `M` atoms that contains the number of IAS points
+ ss_0: float, optional
+ The initial step-size of the ODE (RK45) solver.
+ max_ss: float, optional
+ Maximum step-size of the ODE (RK45) solver.
+ tol: float, optional
+ Tolerance for the adaptive step-size.
+
+ Return
+ -------
+ List[ndarray()], list[list[int]]:
+ The first list holds arrays that gives the radius value that intersects the IAS or OAS.
+ The second list of size `M`, holds which ias crosses which other basin.
+
+ """
+ if not isinstance(ias_indices, np.ndarray):
+ raise TypeError(f"The parameters to solve indices should be numpy array instead of {type(ias_indices)}.")
+ r_func = [np.zeros((len(angular_pts[i]),), dtype=np.float64) for i in range(len(maximas))]
+ basin_ias = [[-1] * ias_lengths[i] for i in range(len(maximas))]
+
+ while len(ias_indices) != 0:
+ # Construct New Points
+ points = []
+ for (i_maxima, i_ang, l_bnd, u_bnd, _, _, _) in ias_indices:
+ # Take the midpoint of interval [l_bnd, u_bnd]
+ ray = maximas[int(i_maxima)] + (l_bnd + u_bnd) / 2.0 * angular_pts[int(i_maxima)][int(i_ang), :]
+ points.append(ray)
+ points = np.vstack(points)
+
+ # Solve for basins
+ # Take the initial step-size of ODE solver based on step-size of ss_0 and the interval of [l_bnd, u_bnd]
+ step_size = np.minimum(ias_indices[:, 4][:, None], ss_0)
+ basins, _ = find_basins_steepest_ascent_rk45(
+ points, dens_func, grad_func, beta_spheres, maximas, tol=tol, max_ss=max_ss, ss_0=step_size, hess_func=hess_func
+ )
+ # print("Basins", basins)
+
+ # Refine the rays further
+ # print("Average step-size", np.mean(ias_indices[:, 4]))
+ converge_indices = []
+ # Note it always assumes that `l_bnd` goes to `i_maxima` and `u_bnd` goes to `basin_switch`.
+ for i, (i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch, i_ias) in enumerate(ias_indices):
+ #print((i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch))
+ if basins[i] == i_maxima:
+ # Calculate new step-size between [(l_bnd + u_bnd) / 2, u_bnd]
+ new_l_bnd = (l_bnd + u_bnd) / 2
+ new_u_bnd = u_bnd
+ else:
+ # Calculate new stepsize between [l_bnd, (l_bnd + u_bnd) / 2]
+ new_l_bnd = l_bnd
+ new_u_bnd = (l_bnd + u_bnd) / 2
+ # Update the basin that it switches to.
+ if basin_switch != basins[i]:
+ basin_switch = basins[i]
+ new_ss = (new_u_bnd - new_l_bnd)
+ # If new stepsize was less than bnd_err, then we converge and should stop.
+ if new_ss <= bnd_err:
+ r_func[int(i_maxima)][int(i_ang)] = (new_l_bnd + new_u_bnd) / 2
+ basin_ias[int(i_maxima)][int(i_ias)] = int(basin_switch)
+ converge_indices.append(i) # Put in list to remove indices.
+ else:
+ # Update ias_indices for the next iteration for example l_bnd, u_bnd, step-size
+ # decrease step-size by two to make it smaller than the interval bound
+ new_step_size = np.minimum(new_ss / 2.0, ss_0)
+ ias_indices[i] = [i_maxima, i_ang, new_l_bnd, new_u_bnd, new_step_size, basin_switch, i_ias]
+
+ # Remove converged indices
+ # print("Convergence indices", converge_indices)
+ ias_indices = np.delete(ias_indices, converge_indices, axis=0)
+
+ # Solve for multiple intersections
+ return r_func, basin_ias
+
+
+def qtaim_surface_vectorize(
+ angular,
+ centers,
+ dens_func,
+ grad_func,
+ iso_val=0.001,
+ bnd_err=1e-4,
+ iso_err=1e-5,
+ beta_spheres=None,
+ beta_sphere_deg=27,
+ ss_0=0.1,
+ max_ss=0.25,
+ tol=1e-7,
+ optimize_centers=True,
+ hess_func=None,
+ find_multiple_intersections=False,
+ maximas_to_do=None,
+ min_pt_radial=0.1,
+ padding_radial=3.0,
+ ss_radial=0.24,
+):
+ r"""
+ Parameters
+ ----------
+ angular: List[AngularGrid] or List[int]
+ List of angular grids over each atom, or a list of their degrees.
+ centers: ndarray(M, 3)
+ Atomic coordinates.
+ dens_func: callable(ndarray(N, 3)->ndarray(N,))
+ The electron density function.
+ grad_func: callable(ndarray(N, 3)->ndarray(N,3))
+ The gradient of the electron density.
+ iso_val: float
+ Isosurface value of the outer-atomic surface.
+ bnd_err: float
+ The error of the points on the inner-atomic surface.
+ iso_err: float
+ The error in solving for the isosurface points on the outer-atomic surface.
+ beta_spheres: (List[float] or None)
+ The radius of confidence that points are assigned to the atom. Should have length `M`.
+ beta_sphere_deg: int
+ Integer specifying angular grid of degree `beta_sphere_deg` that is used to find the beta-sphere
+ automatically, if `beta_spheres` isn't provided. Default value is 21.
+ ss_0: float, optional
+ The initial step-size of the ODE (RK45) solver.
+ max_ss: float, optional
+ Maximum step-size of the ODE (RK45) solver.
+ tol: float, optional
+ Tolerance for the adaptive step-size of the ODE solver.
+ optimize_centers: bool
+ If true, then the steepest-ascent is performed on the centers to find the local maximas.
+ hess_func: callable(ndarray(N, 3)->ndarray(N, 3, 3))
+ The Hessian of the electron density. If this is provided, then non-nuclear attractors will be found.
+ Adds a default Lebedev/angular grid of degree fifty and 0.1 a.u. to the `beta-spheres` if it is
+ provided.
+ find_multiple_intersections: bool
+ If true, then it searches for up to three intersections of the inter-atomic surface. This is a
+ time-consuming process but produces more accurate surfaces.
+ maximas_to_do: (None, list[int])
+ List of indices of the `centers`/`maximas` to solve for the QTAIM basin surface. If this is provided,
+ then `angular` should also be of this length.
+ min_pt_radial: float
+ The minimum point to each radial grids that are constructed over each atom. Sometimes, two
+ maximas are very close to one another and so it controls how the beta-spheres are determined.
+ padding_radial: float
+ Adds a padding to the maximum of each radial grids that are constructed over each atom.
+ The default maximum is taken based on the maximum distance between the closest five atoms.
+ It affects the decision that the ray crosses the IAS or OAS.
+ ss_radial: float, list[float]
+ The step-size of the radial grids over each atom. It affects the decision that the ray crosses the IAS or OAS.
+ Smaller step-size is able to capture more points that should be on the IAS.
+
+ Returns
+ -------
+ SurfaceQTAIM:
+ Object that holds all information regarding the surface of each atom.
+
+ Notes
+ -----
+ The algorithm is as follows:
+ 1. Optimize the centers provided to obtain the local maximas.
+ 2. Determine the beta-spheres over all atoms.
+ 3. Using an angular grid and radial grid, construct all rays propagating across all atoms.
+ 4. Solve for each basin value for each point.
+ 5. Analyze the basin values and classify each ray as either an outer-atomic or inner-atomic
+ surface point.
+ 6. For the inner-atomic rays, find the point of intersection to the surface boundary based on moving
+ along the ray.
+ 7. For the points on the outer-atomic surface, solve for when the point has density equal to the isosurface value.
+ 8. Analyze points on the IAS, and if their density values are less than the outer-atomic surface, then
+ re-classify them as a OAS point.
+
+ """
+ if len(angular) != len(centers):
+ raise ValueError(f"Length of angular {len(angular)} should be the same as the"
+ f"number of centers {len(centers)}.")
+ if beta_spheres is not None and len(centers) != len(beta_spheres):
+ raise ValueError(
+ f"Beta sphere length {len(beta_spheres)} should match the"
+ f" number of centers {len(centers)}"
+ )
+ if not isinstance(beta_spheres, (type(None), np.ndarray)):
+ raise TypeError(f"Beta_sphers {type(beta_spheres)} should be of numpy type.")
+ if maximas_to_do is not None and not isinstance(maximas_to_do, list):
+ raise TypeError(f"Maximas to do {type(maximas_to_do)} should be either None or a list of integers.")
+ if maximas_to_do is not None and max(maximas_to_do) >= len(centers):
+ raise ValueError(f"Length of maximas_to_do {len(maximas_to_do)} should be less then"
+ f" length of centers {len(centers)}.")
+ if isinstance(ss_radial, float):
+ ss_radial = [ss_radial] * len(centers)
+ if len(ss_radial) != len(centers):
+ raise ValueError(f"The step-size for each radial grid {len(ss_radial)} should be"
+ f" equal to the number of molecules {len(centers)}.")
+ if maximas_to_do is None:
+ maximas_to_do = np.arange(len(centers))
+
+ # Using centers, update to the maximas
+ maximas = centers
+ if optimize_centers:
+ # Using ODE solver to refine the maximas further.
+ maximas = find_optimize_centers(centers, grad_func)
+
+ # Construct a dense radial grid for each atom by taking distance to the closest five atoms.
+ ss0 = 0.1
+ # radial_grids = construct_radial_grids(maximas[maximas_to_do], maximas, min_pts=0.1, pad=padding_radial, ss0=ss0)
+ radial_grids = construct_radial_grids(maximas, maximas, min_pts=0.1, pad=padding_radial, ss0=ss0)
+
+ # Determine beta-spheres and non-nuclear attractors from a smaller angular grid
+ # Degree can't be too small or else the beta-radius is too large and IAS point got classified
+ # as OAS point.
+ ang_grid = AngularGrid(degree=beta_sphere_deg, method="spherical")
+ if beta_spheres is None:
+ beta_spheres, maximas, radial_grids = determine_beta_spheres_and_nna(
+ beta_spheres, maximas, radial_grids, ang_grid.points, dens_func, grad_func, hess_func
+ )
+ beta_spheres = np.array(beta_spheres)
+ print(f"Final Beta-spheres {beta_spheres}")
+ # Check beta-spheres are not intersecting
+ dist_maxs = cdist(maximas, maximas)
+ condition = dist_maxs <= beta_spheres[:, None] + beta_spheres
+ condition[range(len(maximas)), range(len(maximas))] = False # Diagonal always true
+ if np.any(condition):
+ raise ValueError(f"Beta-spheres {beta_spheres} overlap with one another.")
+ # Rotate the beta-sphere and double check the radius is correct
+ rot_mat = r.from_euler("z", 10, degrees=True).as_matrix() # Generate rotation by 10 degrees in z-axis
+ ang_pts = ang_grid.points.dot(rot_mat.T) # Rotate the angular points
+ assert np.all(np.abs(np.linalg.norm(ang_pts, axis=1) - 1.0) < 1e-8)
+ for i_maxima, radius in enumerate(beta_spheres):
+ pts_at_rad = maximas[i_maxima] + radius * ang_pts
+ basins, _ = find_basins_steepest_ascent_rk45(
+ pts_at_rad, dens_func, grad_func, beta_spheres, maximas, tol=tol, max_ss=max_ss, ss_0=ss_0
+ )
+ #assert np.all(basins == i_maxima)
+ if not np.all(basins == i_maxima):
+ # Decrease the beta-sphere by the step-size if all of the beta-sphere didn't converge to the
+ # correct atom.
+ beta_spheres[i_maxima] -= ss0
+
+ # Construct a coarse radial grid for each atom starting at the beta-spheres.
+ radial_grids_old = radial_grids
+ radial_grids = []
+ i_do = 0
+ for i_atom in range(len(maximas)):
+ if i_atom in maximas_to_do:
+ radial_grids.append(
+ np.arange(beta_spheres[i_atom], radial_grids_old[i_do][-1], ss_radial[i_atom])
+ )
+ i_do += 1
+ else:
+ radial_grids.append([])
+
+ # Construct Angular Points
+ angular_pts = []
+ for i in range(len(maximas)):
+ # If it is not provided, then use what's specified
+ if i < len(angular):
+ if i in maximas_to_do:
+ ang = angular[i]
+ if isinstance(ang, int):
+ angular_pts.append(AngularGrid(degree=ang, method="spherical").points)
+ else:
+ angular_pts.append(ang.points)
+ else:
+ angular_pts.append([])
+ else:
+ # If it is a Non-nuclear attractor
+ angular.append(99)
+ angular_pts.append(AngularGrid(degree=99, method="spherical").points)
+
+ # First step is to construct a grid that encloses all radial shells across all atoms
+ points, index_to_atom, NUMB_RAYS_TO_ATOM, numb_rad_to_radial_shell = \
+ construct_all_points_of_rays_of_atoms(
+ maximas, angular_pts, radial_grids, maximas_to_do, dens_func, iso_val
+ )
+ # print("Index to atom ", index_to_atom)
+
+ # Then assign basins values for all the points.
+ import time
+ start = time.time()
+ basins, _ = find_basins_steepest_ascent_rk45(
+ points, dens_func, grad_func, beta_spheres, maximas, tol=tol, max_ss=max_ss, ss_0=ss_0,
+ hess_func=hess_func, check_for_nna=True
+ )
+ final = time.time()
+ # print("Basins", basins)
+ # print("Length of basins ", len(basins))
+ print("Difference ", final - start)
+
+ # Using basin values, classify each ray as either IAS or OAS and if IAS, find the interval
+ # along the ray that intersects the IAS.
+ ias, oas, ias_bnds, ias_basins, ias_2, ias_bnds_2, ias_basins_2, ias_3, ias_bnds_3, ias_basins_3 = \
+ _classify_rays_as_ias_or_oas(
+ maximas, maximas_to_do, points, basins, index_to_atom, NUMB_RAYS_TO_ATOM, numb_rad_to_radial_shell
+ )
+
+ print("Total number of two intersections found ", [len(x) for x in ias_2])
+ print("Total number of three intersections found ", [len(x) for x in ias_3])
+
+ # The IAS is just refining the ray, till you find the exact intersection with the surface.
+ # Checks docs of `_solve_intersection_of_ias` for what ias_indices is.
+ # i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch, i_ias
+ ias_indices = np.array(list(
+ itertools.chain.from_iterable(
+ [[(i, i_ang, ias_bnds[i][i_ang][0], ias_bnds[i][i_ang][1], max(ss_radial[i] / 10.0, bnd_err), ias_basins[i][i_ang], i_ias)
+ for i_ias, i_ang in enumerate(ias[i])] for i in maximas_to_do]
+ )
+ ))
+ start = time.time()
+ r_func, basin_ias = _solve_intersection_of_ias_point(
+ maximas, ias_indices, angular_pts, dens_func, grad_func, beta_spheres, bnd_err,
+ tol=tol, max_ss=max_ss, ss_0=ss_0, ias_lengths=[len(x) for x in ias], hess_func=hess_func
+ )
+ final = time.time()
+ print("Time Difference for Solving IAS ", final - start)
+
+ # Pts on IAS that should be OAS are solved here. TODO:, the _solve_intersection should handle it
+ # so it doesn't solve it again.
+ for i_atom in maximas_to_do:
+ pts = maximas[i_atom] + r_func[i_atom][ias[i_atom], None] * angular_pts[i_atom][ias[i_atom], :]
+ dens_vals = dens_func(pts)
+ # Decrease by the OAS surface error "iso_err"
+ ias_indices = np.where(dens_vals - iso_err < iso_val)[0]
+ if len(ias_indices) != 0:
+ print(f"Atom {i_atom} points that are IAS should be OAS")
+ oas[i_atom] += list(np.array(ias[i_atom], dtype=int)[ias_indices])
+ oas[i_atom] = sorted(oas[i_atom])
+ ias[i_atom] = [k for j, k in enumerate(ias[i_atom]) if j not in ias_indices]
+ basin_ias[i_atom] = [k for j, k in enumerate(basin_ias[i_atom]) if j not in ias_indices]
+
+ # Solve OAS Points and updates r_func
+ # Update the radial grid step-size so that it samples more points, this shouldn't decrease computational complexity
+ # since the electron density is cheaper to compute with.
+ start = time.time()
+ solve_for_oas_points(maximas, maximas_to_do, oas, angular_pts, dens_func, iso_val, iso_err, r_func)
+ final = time.time()
+ print("Time Difference for Solving OAS", final - start)
+
+ if find_multiple_intersections:
+ raise NotImplementedError(f"Multiple intersections was not implemented yet.")
+
+ return SurfaceQTAIM(r_func, angular, maximas, maximas_to_do, oas, ias, basin_ias, iso_val, beta_spheres)
+
+
+
+
+def _solve_intersection_of_ias_point_hirshfeld(
+ maximas, ias_indices, angular_pts, basin_assignment_callable, bnd_err, ias_lengths,
+):
+ r"""
+ Solves the intersection of the ray to the inner-atomic surface.
+
+ A point is associated to each ray based on `ias_indices`. The basin value
+ is assigned to each point, and the point is moved along the ray until it keeps switching basins.
+ The process is further repeated with a smaller step-size until the distance between two
+ points on the ray is less than `bnd_err`.
+
+ Parameters
+ ----------
+ maximas: ndarray(M, 3)
+ Optimized centers of the electron density.
+ ias_indices: ndarray(N, 6)
+ Rows correspond to each ray that intersects the IAS.
+ First index is which index of maxima it originates from, then second
+ index is index of angular point/ray, third index is the lower bound radius
+ and fourth index is the upper-bound radius, fifth index is the step-size.
+ The fifth index holds which basin the IAS point switches to. Note it may
+ not be the true basin value that it switches to.
+ The sixth index holds which index of the `IAS` list it points to.
+ angular_pts: list[ndarray]
+ List of size `M` of the angular points over each maxima.
+ dens_func:
+ The density of electron density.
+ grad_func:
+ The gradient of the electron density.
+ beta_spheres: ndarray(M,)
+ Beta-spheres radius of each atom.
+ bnd_err: float
+ The error of the intersection of the IAS. When the distance to two consequent points
+ on the ray crosses different basins is less than this error, then the midpoint is accepted
+ as the final radius value.
+ ias_lengths: list[int]
+ List of length `M` atoms that contains the number of IAS points
+ ss_0: float, optional
+ The initial step-size of the ODE (RK45) solver.
+ max_ss: float, optional
+ Maximum step-size of the ODE (RK45) solver.
+ tol: float, optional
+ Tolerance for the adaptive step-size.
+
+ Return
+ -------
+ List[ndarray()], list[list[int]]:
+ The first list holds arrays that gives the radius value that intersects the IAS or OAS.
+ The second list of size `M`, holds which ias crosses which other basin.
+
+ """
+ if not isinstance(ias_indices, np.ndarray):
+ raise TypeError(f"The parameters to solve indices should be numpy array instead of {type(ias_indices)}.")
+ r_func = [np.zeros((len(angular_pts[i]),), dtype=np.float64) for i in range(len(maximas))]
+ basin_ias = [[-1] * ias_lengths[i] for i in range(len(maximas))]
+
+ while len(ias_indices) != 0:
+ # Construct New Points
+ points = []
+ for (i_maxima, i_ang, l_bnd, u_bnd, _, _, _) in ias_indices:
+ # Take the midpoint of interval [l_bnd, u_bnd]
+ ray = maximas[int(i_maxima)] + (l_bnd + u_bnd) / 2.0 * angular_pts[int(i_maxima)][int(i_ang), :]
+ points.append(ray)
+ points = np.vstack(points)
+
+ # Solve for basins
+ # Take the initial step-size of ODE solver based on step-size of ss_0 and the interval of [l_bnd, u_bnd]
+ basins = basin_assignment_callable(points)
+ # print("Basins", basins)
+
+ # Refine the rays further
+ # print("Average step-size", np.mean(ias_indices[:, 4]))
+ converge_indices = []
+ # Note it always assumes that `l_bnd` goes to `i_maxima` and `u_bnd` goes to `basin_switch`.
+ for i, (i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch, i_ias) in enumerate(ias_indices):
+ #print((i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch))
+ if basins[i] == i_maxima:
+ # Calculate new step-size between [(l_bnd + u_bnd) / 2, u_bnd]
+ new_l_bnd = (l_bnd + u_bnd) / 2
+ new_u_bnd = u_bnd
+ else:
+ # Calculate new stepsize between [l_bnd, (l_bnd + u_bnd) / 2]
+ new_l_bnd = l_bnd
+ new_u_bnd = (l_bnd + u_bnd) / 2
+ # Update the basin that it switches to.
+ if basin_switch != basins[i]:
+ basin_switch = basins[i]
+ new_ss = (new_u_bnd - new_l_bnd)
+ # If new stepsize was less than bnd_err, then we converge and should stop.
+ if new_ss <= bnd_err:
+ r_func[int(i_maxima)][int(i_ang)] = (new_l_bnd + new_u_bnd) / 2
+ basin_ias[int(i_maxima)][int(i_ias)] = int(basin_switch)
+ converge_indices.append(i) # Put in list to remove indices.
+ else:
+ # Update ias_indices for the next iteration for example l_bnd, u_bnd, step-size
+ # decrease step-size by two to make it smaller than the interval bound
+ ias_indices[i] = [i_maxima, i_ang, new_l_bnd, new_u_bnd, None, basin_switch, i_ias]
+
+ # Remove converged indices
+ # print("Convergence indices", converge_indices)
+ ias_indices = np.delete(ias_indices, converge_indices, axis=0)
+
+ # Solve for multiple intersections
+ return r_func, basin_ias
+
+
+def hirshfeld_surface_vectorize(
+ angular,
+ centers,
+ dens_func,
+ basin_assignment_callable,
+ iso_val=0.001,
+ bnd_err=1e-4,
+ iso_err=1e-5,
+ find_multiple_intersections=False,
+ maximas_to_do=None,
+ min_pt_radial=0.1,
+ padding_radial=3.0,
+ ss_radial=0.24,
+):
+ if len(angular) != len(centers):
+ raise ValueError(f"Length of angular {len(angular)} should be the same as the"
+ f"number of centers {len(centers)}.")
+ if maximas_to_do is not None and not isinstance(maximas_to_do, list):
+ raise TypeError(f"Maximas to do {type(maximas_to_do)} should be either None or a list of integers.")
+ if maximas_to_do is not None and max(maximas_to_do) >= len(centers):
+ raise ValueError(f"Length of maximas_to_do {len(maximas_to_do)} should be less then"
+ f" length of centers {len(centers)}.")
+ if isinstance(ss_radial, float):
+ ss_radial = [ss_radial] * len(centers)
+ if len(ss_radial) != len(centers):
+ raise ValueError(f"The step-size for each radial grid {len(ss_radial)} should be"
+ f" equal to the number of molecules {len(centers)}.")
+ if maximas_to_do is None:
+ maximas_to_do = np.arange(len(centers))
+
+ # Using centers, update to the maximas
+ maximas = centers
+
+ # Construct a dense radial grid for each atom by taking distance to the closest five atoms.
+ ss0 = 0.1
+ radial_grids = construct_radial_grids(maximas, maximas, min_pts=0.1, pad=padding_radial, ss0=ss0)
+
+ # Construct a coarse radial grid for each atom starting at the beta-spheres.
+ radial_grids_old = radial_grids
+ radial_grids = []
+ i_do = 0
+ for i_atom in range(len(maximas)):
+ if i_atom in maximas_to_do:
+ radial_grids.append(
+ np.arange(min_pt_radial, radial_grids_old[i_do][-1], ss_radial[i_atom])
+ )
+ i_do += 1
+ else:
+ radial_grids.append([])
+
+ # Construct Angular Points
+ angular_pts = []
+ for i in range(len(maximas)):
+ if i in maximas_to_do:
+ ang = angular[i]
+ if isinstance(ang, int):
+ angular_pts.append(AngularGrid(degree=ang, method="spherical").points)
+ else:
+ angular_pts.append(ang.points)
+ else:
+ angular_pts.append([])
+
+ # First step is to construct a grid that encloses all radial shells across all atoms
+ points, index_to_atom, NUMB_RAYS_TO_ATOM, numb_rad_to_radial_shell = \
+ construct_all_points_of_rays_of_atoms(
+ maximas, angular_pts, radial_grids, maximas_to_do, dens_func, iso_val
+ )
+
+ # Then assign basins values for all the points.
+ import time
+ start = time.time()
+ basins = basin_assignment_callable(points)
+ final = time.time()
+ print("Difference ", final - start)
+
+ # Using basin values, classify each ray as either IAS or OAS and if IAS, find the interval
+ # along the ray that intersects the IAS.
+ ias, oas, ias_bnds, ias_basins, ias_2, ias_bnds_2, ias_basins_2, ias_3, ias_bnds_3, ias_basins_3 = \
+ _classify_rays_as_ias_or_oas(
+ maximas, maximas_to_do, points, basins, index_to_atom, NUMB_RAYS_TO_ATOM, numb_rad_to_radial_shell
+ )
+
+ print("Total number of two intersections found ", [len(x) for x in ias_2])
+ print("Total number of three intersections found ", [len(x) for x in ias_3])
+
+ # The IAS is just refining the ray, till you find the exact intersection with the surface.
+ # Checks docs of `_solve_intersection_of_ias` for what ias_indices is.
+ # i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch, i_ias
+ ias_indices = np.array(list(
+ itertools.chain.from_iterable(
+ [[(i, i_ang, ias_bnds[i][i_ang][0], ias_bnds[i][i_ang][1], max(ss_radial[i] / 10.0, bnd_err), ias_basins[i][i_ang], i_ias)
+ for i_ias, i_ang in enumerate(ias[i])] for i in maximas_to_do]
+ )
+ ))
+ start = time.time()
+ r_func, basin_ias = _solve_intersection_of_ias_point_hirshfeld(
+ maximas, ias_indices, angular_pts, basin_assignment_callable, bnd_err, ias_lengths=[len(x) for x in ias]
+ )
+ final = time.time()
+ print("Time Difference for Solving IAS ", final - start)
+
+ # Pts on IAS that should be OAS are solved here. TODO:, the _solve_intersection should handle it
+ # so it doesn't solve it again.
+ for i_atom in maximas_to_do:
+ pts = maximas[i_atom] + r_func[i_atom][ias[i_atom], None] * angular_pts[i_atom][ias[i_atom], :]
+ dens_vals = dens_func(pts)
+ # Decrease by the OAS surface error "iso_err"
+ ias_indices = np.where(dens_vals - iso_err < iso_val)[0]
+ if len(ias_indices) != 0:
+ print(f"Atom {i_atom} points that are IAS should be OAS")
+ oas[i_atom] += list(np.array(ias[i_atom], dtype=int)[ias_indices])
+ oas[i_atom] = sorted(oas[i_atom])
+ ias[i_atom] = [k for j, k in enumerate(ias[i_atom]) if j not in ias_indices]
+ basin_ias[i_atom] = [k for j, k in enumerate(basin_ias[i_atom]) if j not in ias_indices]
+
+ # Solve OAS Points and updates r_func
+ # Update the radial grid step-size so that it samples more points, this shouldn't decrease computational complexity
+ # since the electron density is cheaper to compute with.
+ start = time.time()
+ solve_for_oas_points(maximas, maximas_to_do, oas, angular_pts, dens_func, iso_val, iso_err, r_func)
+ final = time.time()
+ print("Time Difference for Solving OAS", final - start)
+
+ if find_multiple_intersections:
+ raise NotImplementedError(f"Multiple intersections was not implemented yet.")
+
+ return SurfaceQTAIM(r_func, angular, maximas, maximas_to_do, oas, ias, basin_ias, iso_val, None)
diff --git a/chemtools/topology/qtaim_depreciated.py b/chemtools/topology/qtaim_depreciated.py
new file mode 100644
index 00000000..47580721
--- /dev/null
+++ b/chemtools/topology/qtaim_depreciated.py
@@ -0,0 +1,863 @@
+# -*- coding: utf-8 -*-
+# ChemTools is a collection of interpretive chemical tools for
+# analyzing outputs of the quantum chemistry calculations.
+#
+# Copyright (C) 2016-2019 The ChemTools Development Team
+#
+# This file is part of ChemTools.
+#
+# ChemTools is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# ChemTools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see
+#
+# --
+import numpy as np
+
+from scipy.integrate import solve_ivp
+from scipy.interpolate import LSQSphereBivariateSpline, SmoothSphereBivariateSpline
+from scipy.optimize import root_scalar
+from scipy.spatial import ConvexHull, Voronoi
+from scipy.spatial.distance import cdist
+
+from scipy.sparse import lil_matrix
+
+from grid.atomgrid import AtomGrid
+from grid.cubic import UniformGrid, _HyperRectangleGrid
+from grid.angular import AngularGrid
+from grid.utils import convert_cart_to_sph
+
+from chemtools.topology.ode import steepest_ascent_rk45
+from chemtools.topology.surface import SurfaceQTAIM
+
+import time
+
+
+def construct_points_between_ias_and_oas(
+ ias: list, oas: int, angular_pts: np.ndarray, r_func_max: np.ndarray, maxima: np.ndarray
+):
+ r"""
+ Construct points between the inner atomic surface and outer atomic surface.
+
+ This is done by constructed a convex hull between IAS and OAS, seperetely.
+ Each point on the IAS, the two closest points are found on the OAS, then
+ a triangle is constructed. Seven points are constructed within this triangle
+ and the Cartesian coordinates of the sphere centered at the maxima is solved
+ for each of these seven points.
+
+ Parameters
+ -----------
+ ias : List[int]
+ List of integers of `angular_pts` that are part of the inner atomic surface (IAS).
+ oas : List[int]
+ List of integers of `angular_pts` that are part of the outer atomic surface (OAS).
+ angular_pts : np.ndarray
+ Angular Points around the maxima for which rays are propgated from.
+ r_func_max : np.ndarray
+ The radial component for each angular point in `angular_pts` that either gives
+ the radial value that intersects the OAS or the IAS.
+ maxima : np.ndarray
+ Maxima of the basin.
+
+ Returns
+ -------
+ ndarray(K * 7, 3)
+ Cartesian coordinates of :math:`K` points on the sphere centered at `maxima` such that
+ they correspond to the seven points constructed above, where :math:`K` is the number
+ of points on the IAS of `maxima`.
+
+ """
+ # Take a convex hull of both IAS and OAS seperately.
+ ias_pts = maxima + r_func_max[ias, None] * angular_pts[ias, :]
+ oas_pts = maxima + r_func_max[oas, None] * angular_pts[oas, :]
+ ias_hull = ConvexHull(ias_pts)
+ oas_hull = ConvexHull(oas_pts)
+ ias_bnd = ias_hull.points[ias_hull.vertices]
+ oas_bnd = oas_hull.points[oas_hull.vertices]
+
+ # Compute the distance matrix
+ dist_mat = cdist(ias_bnd, oas_bnd)
+ # for each point in say ias take the closest two points in oas.
+ new_ang_pts = np.zeros((0, 3), dtype=np.float64) # usually 7 points per ias boundary are added.
+ for i_ias, pt_ias in enumerate(ias_bnd):
+ # Get the two closest points on OAS to this IAS pt.
+ two_indices = dist_mat[i_ias].argsort()[:2]
+ pt1, pt2 = oas_bnd[two_indices[0]], oas_bnd[two_indices[1]]
+
+ # Take the center and midpoint between each line of the triangle (pt_ias, pt1, pt2)
+ midpoint = (pt1 + pt2 + pt_ias) / 3.0
+ line_pt1 = (pt1 + pt_ias) / 2.0
+ line_pt2 = (pt2 + pt_ias) / 2.0
+ line_pt3 = (pt1 + pt2) / 2.0
+
+ # The triangle with the center can be split into three polygons, take the center of each.
+ poly_pt1 = (midpoint + line_pt1 + line_pt2 + pt_ias) / 4.0
+ poly_pt2 = (midpoint + line_pt1 + line_pt3 + pt1) / 4.0
+ poly_pt3 = (midpoint + line_pt2 + line_pt3 + pt2) / 4.0
+
+ new_pts = np.array([midpoint, line_pt1, line_pt2, line_pt3, poly_pt1, poly_pt2, poly_pt3])
+ # Solve for the Cartesian angular coordinates of these 7 points by solving
+ # r = m + t direction, where m is the maxima, direction has norm one, r is each of
+ # these points
+
+ # print("new pts ", new_pts)
+ # matplotlib.use("Qt5Agg")
+ # fig = plt.figure()
+ # ax = plt.axes(projection='3d')
+ # p = ias_bnd
+ # ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="k")
+ # p = oas_bnd
+ # ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="r")
+ # p = new_pts
+ # ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="y")
+ # ax.scatter(pt1[0], pt1[1], pt1[2], color="m", s=30)
+ # ax.scatter(pt2[0], pt2[1], pt2[2], color="m", s=30)
+ # plt.show()
+
+ direction = new_pts - maxima
+ # Delete points that are on the maxima.
+ direction = np.delete(direction, np.all(np.abs(direction) < 1e-10, axis=1), axis=0)
+ print("Direction ", direction)
+ t = np.linalg.norm(direction, axis=1)
+ direction = direction / t[:, None]
+ # Delete directions that are the same
+ direction = np.unique(direction, axis=0)
+ new_ang_pts = np.vstack((new_ang_pts, direction))
+ return new_ang_pts
+
+
+def gradient_path(pt, grad_func, maximas=None, t_span=(0, 1000), method="LSODA", max_step=100,
+ t_inc=400, max_tries=10, first_step=1e-3, beta_spheres=-np.inf):
+ # TODO: If the density value is low, normalized_gradient low and trying ODE did not move much,
+ # then an option is to turn max_step tp np.inf
+ is_converged = False
+ y0 = pt.copy()
+ # print("PT ", pt)
+ numb_times = 0
+ def grad(t, x):
+ return grad_func(np.array([x]))[0]
+
+ while not is_converged and numb_times < max_tries:
+ sol = solve_ivp(
+ grad, #lambda t, x: grad_func(np.array([x]))[0].T,
+ y0=y0,
+ t_span=t_span,
+ method=method,
+ max_step=max_step,
+ first_step=first_step,
+ # vectorized=True,
+ )
+ # print(sol)
+ assert sol["success"], "ODE was not successful."
+ # TODO: Write in docs that it summes all local maximas are identified.
+ # If it is close to a maxima or within any of the beta-spheres, then stop.
+ if maximas is not None:
+ last_y_val = sol["y"][:, -1]
+ dist_maxima = np.linalg.norm(last_y_val - maximas, axis=1)
+ if np.any(dist_maxima < 0.1) or np.any(dist_maxima <= beta_spheres):
+ return sol["y"][:, -1]
+ # if maximas not specified, then just look at if it converged.
+ else:
+ convergence = np.linalg.norm(sol["y"][:, -2] - sol["y"][:, -1])
+ if convergence < 1e-1:
+ return sol["y"][:, -1]
+ # No convergence occured, so increaes t-span.
+ # print(sol["y"][:, -1], t_span, "YE")
+ t_span = (t_span[1], t_span[1] + t_inc)
+ y0 = sol["y"][:, -1]
+ numb_times += 1
+
+ if numb_times == max_tries:
+ raise RuntimeError(f"No convergence in normalized_gradient path pt {pt},"
+ f" solution {sol['y'][:, -1]}, t_span {t_span}")
+
+
+def gradient_path_all_pts(
+ pts, grad_func, beta_spheres, i_maxima, maximas=None, t_span=(0, 1000), method="LSODA", max_step=100,
+ t_inc=500, first_step=1e-3, rtol=1e-4, atol=1e-7, is_watershed_pt=False
+):
+ # i_maxima is the index of the maxima where the ray originates from
+ def grad(t, x):
+ return grad_func(np.array([x]))[0]
+
+ basins = np.zeros((len(pts),))
+ for i_pt, pt in enumerate(pts):
+ found_basin = False
+ y0 = pt.copy()
+ print("Pt to backtrace", pt)
+
+ while not found_basin:
+ sol = solve_ivp(
+ grad,
+ y0=y0,
+ t_span=t_span,
+ method=method,
+ max_step=max_step,
+ first_step=first_step,
+ rtol=rtol,
+ atol=atol,
+ )
+ assert sol["success"], "ODE was not successful."
+ y_vals = sol["y"][:, -1]
+
+ # See if any of the points converge to their beta-spheres.
+ dist_maxima = cdist(np.array([y_vals]), maximas)
+ beta_sph = dist_maxima <= beta_spheres
+ # print("Dist maxima ", dist_maxima)
+ # print("Beta-Sphere ", beta_sph, beta_spheres)
+ if np.any(beta_sph):
+ which_basin = np.where(beta_sph[0])
+ assert len(which_basin[0]) == 1, "More than one basin was found"
+ print("Which basin ", which_basin)
+ found_basin = True
+ basins[i_pt] = which_basin[0][0]
+
+ # If it is a point that is guaranteed to be watershed point
+ # then stop when the point that it isn't i_maxima is found.
+ if is_watershed_pt and which_basin[0][0] != i_maxima:
+ return basins
+
+ # Could not found basin, so update
+ t_span = (t_span[1], t_span[1] + t_inc)
+ y0 = y_vals
+ return basins
+
+
+def gradient_path_vectorized(pts, grad_func, maximas=None, t_span=(0, 1000), method="LSODA", max_step=100,
+ t_inc=400, max_tries=10, first_step=1e-3, beta_spheres=-np.inf, rtol=1e-4, atol=1e-7):
+ y0 = np.ravel(pts, order="C")
+ numb_pts = len(pts)
+ print("Numb_pts ", numb_pts)
+ numb_tries = 0
+
+ def grad(t, pts, numb_rays):
+ pts_arr = np.reshape(pts, (numb_rays, 3), order="C")
+ pts_arr = pts_arr.copy()
+ return np.ravel(grad_func(pts_arr), order="C")
+
+ indices = np.arange(0, numb_pts) # indices not converged
+ basins = np.zeros((numb_pts), dtype=int)
+ while len(indices) != 0:
+ sol = solve_ivp(
+ grad,
+ y0=y0,
+ t_span=t_span,
+ method=method,
+ max_step=max_step,
+ first_step=first_step,
+ args=(numb_pts,),
+ rtol=rtol,
+ atol=atol,
+ vectorized=False,
+ )
+ assert sol["success"]
+ # print(sol)
+ y_vals = sol["y"][:, -1]
+ print("Yvals", np.reshape(y_vals, (numb_pts, 3)))
+ y_vals = np.reshape(y_vals, (numb_pts, 3), order="C")
+
+ # See if any of the points converge to maximas or their beta-spheres.
+ dist_maxima = cdist(y_vals, maximas)
+ print("Dist Maxima", dist_maxima)
+ print("Which less than 0.1 ", np.any(dist_maxima < 0.1, axis=1))
+ conv_to_maxima = np.any(dist_maxima < 0.1, axis=1)
+
+ which_basins = np.argmin(dist_maxima, axis=1) # which maximas it is closest to
+ print("Which basins it converged to ", which_basins)
+
+ beta_sph = dist_maxima <= beta_spheres
+
+ print("Dist maxima <= beta_sphere ", beta_sph)
+ which_beta_basins = np.where(dist_maxima <= beta_spheres)
+ print("which pts are within basin based on beta-sphere", which_beta_basins)
+ conv_to_beta = np.any(beta_sph, axis=1)
+ print("Conv to maxima", conv_to_maxima)
+ print("Conv to bet asphere", conv_to_beta)
+ which_converged = (conv_to_maxima | conv_to_beta)
+ print("which converged ", which_converged)
+ print(np.argmin(which_converged, axis=0))
+
+ # Update which basins it converged to
+ basins[indices[which_converged]] = which_basins[which_converged]
+ if len(which_beta_basins[1]) != 0:
+ # If the distance to beta-sphere where found, then replace it with those values.
+ print("basins", basins)
+ print("indices", indices)
+ print("which converged", which_converged)
+ print(which_beta_basins[1], conv_to_beta)
+ basins[indices[conv_to_beta]] = which_beta_basins[1]
+ print("Basins ", basins)
+
+ # delete indices that converged
+ indices = np.delete(indices, which_converged)
+ y_vals = np.delete(y_vals, which_converged, axis=0)
+ print("indices didn't converge: ", indices)
+
+ # the rest are continued increasing the t_span accordingly.
+ numb_pts = len(indices)
+ y0 = np.ravel(y_vals, order="C")
+ t_span = (t_span[1], t_span[1] + t_inc)
+ numb_tries += 1
+
+ if numb_tries == max_tries:
+ raise RuntimeError(f"No convergence in normalized_gradient path pt {y0},"
+ f" solution {sol['y'][:, -1]}, t_span {t_span}")
+ # input("sds")
+ return basins
+
+
+def solve_for_isosurface_pt(
+ l_bnd, u_bnd, maxima, cart_sphere_pt, density_func, iso_val, iso_err
+):
+ r"""
+ Solves for the point on a ray that satisfies the isosurface value equation.
+
+ .. math::
+ f(r) := \rho(\textbf{A} + r \textbf{\theta}) - c,
+
+ where A is the position of the atom, :math:`\theta` is the Cartesian coordinates of the
+ point on the sphere, r is the radius, and c is the isosurface value. The radius
+ is solved using a root-finding algorithm over an interval that contains the isosurface
+ value.
+
+ Parameters
+ ----------
+ l_bnd: float
+ The lower-bound on the radius for the root-solver. Needs to be less than the
+ isosurface value.
+ u_bnd: float
+ The upper-bound on the radius for the root-solver. Needs to be greater than the
+ isosurface value.
+ maxima: ndarray(3,)
+ The maximum of the atom.
+ cart_sphere_pt: ndarray(3,)
+ The Cartesian coordinates of the point on the sphere.
+ density_func: callable(ndarray(M,3), ndarray(M,))
+ The electron density function.
+ iso_val: float
+ The isosurface value.
+ iso_err: float
+ The xtol for the root-solver.
+
+ Returns
+ -------
+ ndarray(3,):
+ The point :math:`\textbf{A} + r \textbf{\theta}` that satisfies the isosurface value.
+
+ """
+ # Given a series of points based on a maxima defined by angles `cart_sphere_pt` with
+ # radial pts `rad_pts`. The `index_iso` tells us where on these points to construct another
+ # refined grid from finding l_bnd and u_bnd.
+ dens_l_bnd = density_func(np.array([maxima + l_bnd * cart_sphere_pt]))
+ dens_u_bnd = density_func(np.array([maxima + u_bnd * cart_sphere_pt]))
+ if iso_val < dens_u_bnd or dens_l_bnd < iso_val:
+ if iso_val < dens_u_bnd:
+ u_bnd += 1.5
+ elif dens_l_bnd < iso_val:
+ l_bnd -= 1.5
+ # raise ValueError(f"Radial grid {l_bnd, u_bnd} did not bound {dens_l_bnd, dens_u_bnd} "
+ # f"the isosurface value {iso_val}. Use larger radial grid.")
+
+ # Use Root-finding algorithm to find the isosurface point.
+ root_func = lambda t: density_func(np.array([maxima + t * cart_sphere_pt]))[0] - iso_val
+ sol = root_scalar(root_func, method="toms748", bracket=(l_bnd, u_bnd), xtol=iso_err)
+ assert sol.converged, f"Root function did not converge {sol}."
+ bnd_pt = maxima + sol.root * cart_sphere_pt
+ return bnd_pt
+
+
+def solve_for_basin_bnd_pt(
+ dens_cutoff, i_maxima, maximas, radial, cart_sphere_pt, dens_func, grad_func, bnd_err,
+ iso_val, beta_spheres
+):
+ # Construct the ray and compute its density values based on a maxima defined by angles
+ # `cart_sphere_pt` with radial pts `rad_pts`. It goes through each point on the ray
+ # if the ray density value is greater than dens_cutoff, then it is likely this ray
+ # tends towards infinity and has no basin boundary. If density value is larger, then
+ # it solves for the normalized_gradient path via solving normalized_gradient ode. If this ode solution,
+ # is close to other basins, then we found when it switched basins. Then we take
+ # the two points where it switches basin and compute the distance, if this distance
+ # is less than `bnd_err`, then we take the midpoint to be the boundary point on the ray
+ # that intersects the ias. If not, then we construct a new ray with different l_bnd
+ # and u_bnd and reduce the step-size further and repeat this process.
+ rad_pts = radial.copy()
+ ss_ray = np.mean(np.diff(rad_pts)) # Stay with a coarse ray then refine further.
+ index_iso = None # Needed to refine if the ray tends towards infinity.
+ bnd_pt = None # Boundary or Isosurface Point
+ is_ray_to_inf = False # Does this ray instead go towards infinity
+
+ found_watershed_on_ray = False
+ is_watershed_pt = False
+ basin_id = None
+ counter = 0
+ while not found_watershed_on_ray:
+ ray = maximas[i_maxima] + rad_pts[:, None] * cart_sphere_pt
+ ray_density = dens_func(ray)
+ print("Start of Ray ", ray[0], " Cartesian pt of Sphere ", cart_sphere_pt, "Final Ray Pt: ",
+ ray[-1])
+
+ # Cut off ray points that are less than dens_cutoff.
+ ray_cutoff = ray_density < dens_cutoff
+ ray = np.delete(ray, ray_cutoff, axis=0)
+ ray_density = np.delete(ray_density, ray_cutoff)
+
+ # print("The Ray", ray)
+ # print("The Ray Density ", ray_density)
+ grad_norm = np.min(np.linalg.norm(grad_func(ray), axis=1))
+ print("Range ", min(max(0.5 / grad_norm, 10), 50))
+
+ import time
+ start = time.time()
+ # basins = gradient_path_all_pts(
+ # ray, grad_func, beta_spheres, i_maxima, maximas,
+ # t_span=(0, min(max(0.5 / grad_norm, 10), 50)),
+ # max_step=np.inf,
+ # first_step=1e-6,
+ # method="LSODA",
+ # rtol=bnd_err,
+ # atol=1e-6,
+ # is_watershed_pt=is_watershed_pt
+ # )
+ basins = steepest_ascent_rk45(
+ ray, dens_func, grad_func, beta_spheres, maximas, tol=1e-6
+ )
+ final = time.time()
+ print("Difference ", final - start)
+ print("basins ", basins)
+
+ # If they all converged to the same basins, then it is ray to infinity.
+ if np.all(basins == i_maxima):
+ if counter == 0:
+ is_ray_to_inf = True
+ index_iso = np.argsort(np.abs(ray_density - iso_val))[0]
+ print("Ray to infinity with index ", index_iso)
+ break
+ else:
+ raise ValueError("Went from intersecting basin boundary to classifying as ray to inf.")
+ elif len(np.unique(basins)) == 1:
+ raise ValueError(f"Went from intersecting basin boundary to classifying as ray to inf."
+ f"{basins}.")
+
+ # if some converged to other basins then refine further
+ # first, find which points it switched from basin 1 to basin 2.
+ i_switch = np.argmin(basins == i_maxima)
+ # print("i_switch", i_switch)
+ dist = np.linalg.norm(ray[i_switch - 1, :] - ray[i_switch, :])
+ # print("Dist ", dist, np.linalg.norm(rad_pts[i_switch - 1] - rad_pts[i_switch]))
+
+ # If the distance between the two points is less than bnd_err, then stop else refine.
+ if np.abs(dist - bnd_err) < 1e-8:
+ # Take the midpoint to be the boundary point.
+ found_watershed_on_ray = True
+ bnd_pt = (ray[i_switch] + ray[i_switch - 1]) / 2.0
+ basin_id = basins[i_switch] # basins starts at 1
+ print("Found the Watershed point ", bnd_pt, basin_id)
+ else:
+ # Refine Grid Further
+ l_bnd = np.linalg.norm(ray[i_switch - 1] - maximas[i_maxima]) if i_switch != 0 else rad_pts[0] - 1e-3
+ u_bnd = np.linalg.norm(ray[i_switch] - maximas[i_maxima])
+ ss_ray = max(ss_ray / 10.0, bnd_err) # Decrease step-size.
+ rad_pts = np.arange(l_bnd, u_bnd + ss_ray, ss_ray)
+ print("Refine the ray further with l_bnd, u_bnd, ss: ", l_bnd, u_bnd, ss_ray, rad_pts[-1])
+ is_watershed_pt = True # Update that it is a watershed point
+ counter += 1 # increment counter so that it doesn't check if entire ray goes to infity.
+ # input("Refine further")
+ return bnd_pt, is_ray_to_inf, index_iso, found_watershed_on_ray, basin_id
+
+
+def _optimize_centers(centers, grad_func):
+ maximas = np.array(
+ [gradient_path(x, grad_func, t_span=(0, 10), method="BDF",
+ first_step=1e-9, max_step=1e-1) for x in centers],
+ dtype=np.float64
+ )
+ print("New maximas: \n ", maximas)
+ # Check for duplicates
+ distance = cdist(maximas, maximas)
+ distance[np.diag_indices(len(maximas))] = 1.0 # Set diagonal elements to one
+ if np.any(distance < 1e-6):
+ raise RuntimeError(f"Optimized maximas contains duplicates: \n {maximas}.")
+ return maximas
+
+
+def determine_beta_spheres(beta_spheres, maximas, radial_grid, angular_pts, dens_func, grad_func):
+ r"""
+
+ Notes this assumes the initial beta-sphere is 0.01, and so the distance between maximas
+ cannot be smaller than this.
+
+ """
+ numb_maximas = len(maximas)
+ initial_beta_sph = 0.01
+ if beta_spheres is None:
+ beta_spheres = [initial_beta_sph] * numb_maximas
+ # Determine the beta-spheres
+ for i_maxima, maxima in enumerate(maximas):
+ if beta_spheres[i_maxima] == initial_beta_sph:
+ optimal_rad = -np.inf
+ for rad_pt in radial_grid[i_maxima]:
+ if rad_pt > initial_beta_sph:
+ # Determine the points on the sphere with this radius
+ pts = maxima + rad_pt * angular_pts
+ print(pts)
+
+ # basins = gradient_path_all_pts(
+ # pts, grad_func, beta_spheres, i_maxima, maximas,
+ # t_span=(0, 100), max_step=np.inf, method="LSODA",
+ # first_step=1e-7
+ # )
+ basins = steepest_ascent_rk45(
+ pts, dens_func, grad_func#, beta_spheres, maximas
+ )
+ basins = np.array(basins, dtype=np.int)
+ # If all the basins went to same maxima, then update radius
+ # else then break out of this for loop.
+ if np.all(basins == i_maxima):
+ optimal_rad = rad_pt
+ beta_spheres[i_maxima] = optimal_rad
+ print(beta_spheres)
+ print("Optimal radius is ", optimal_rad)
+ else:
+ break
+ print("optimal radius", optimal_rad)
+ # input("next maxima")
+ return beta_spheres
+
+
+def qtaim_surface(angular, centers, dens_func, grad_func, iso_val=0.001,
+ dens_cutoff=1e-5, bnd_err=1e-4, iso_err=1e-6,
+ beta_spheres=None, optimize_centers=True, refine=False):
+ r"""
+ Find the outer atomic and inner atomic surface based on QTAIM.
+
+ For each maxima, a sphere is determined based on `angular` and for each
+ point on the angular/sphere, a ray is created based on the radial grid `rgrids`.
+ The ray is then determines to either go to infinity and cross the isosurface of the
+ electron density or the ray intersects the inner-atomic surface (IAS) of another basin.
+ This is determined for each point on the sphere.
+
+ Parameters
+ ----------
+ angular: List[int] or ndarray(N, 3)
+ Either integer specifying the degree to construct angular/Lebedev grid around each maxima
+ or array of points on the sphere in Cartesian coordinates.
+ centers: ndarray(M,3)
+ List of local maximas of the density.
+ dens_func: Callable(ndarray(N,3) -> ndarray(N,))
+ The density function.
+ grad_func: Callable(ndarray(N,3) -> ndarray(N,3))
+ The normalized_gradient of the density function.
+ iso_val: float
+ The isosurface value of the outer atomic surface.
+ dens_cutoff: float
+ Points on the ray whose density is less than this cutoff are ignored.
+ bnd_err: float
+ This determines the accuracy of points on the inner atomic surface (IAS) by controlling
+ the step-size of the ray that cross the IAS.
+ iso_err: float
+ The error associated to points on the OAS and how close they are to the isosurface value.
+ beta_spheres : list[float]
+ List of size `M` of radius of the sphere centered at each maxima. It avoids backtracing
+ of points within the circle. If None is provided, then beta-sphere is determined
+ computationally.
+ optimize_centers: bool
+ If true, then it will optimize the centers/maximas to get the exact local maximas.
+ refine : (bool, int)
+ If true, then additional points between the IAS and OAS are constructed, added and
+ solved for whether it is on the IAS or OAS.
+
+ Returns
+ -------
+ SurfaceQTAIM
+ Class that contains the inner-atomic surface, outer-atomic surface for each maxima.
+
+ Notes
+ -----
+ - It is possible for a Ray to intersect the zero-flux surface but this algorithm will
+ classify it as a ray to infinity because the points on the other side of the basin have
+ density values so small that the ode doesn't converge to the maxima of the other basin.
+ In this scenario it might be worthwhile to have a denser radial grid with less points
+ away from infinity or have a smaller density cut-off. Alternative for the developer,
+ is to implement highly accurate ode solver at the expense of computation time.
+
+ """
+ if not isinstance(refine, (bool, int)):
+ raise TypeError(f"Refine {type(refine)} should be either boolean or integer.")
+ if dens_cutoff > iso_val:
+ raise ValueError(f"Density cutoff {dens_cutoff} is greater than isosurface val {iso_val}.")
+ if beta_spheres is not None and len(centers) != len(beta_spheres):
+ raise ValueError(
+ f"Beta sphere length {len(beta_spheres)} should match the"
+ f" number of centers {len(centers)}"
+ )
+
+ # Using centers, update to the maximas
+ maximas = centers
+ if optimize_centers:
+ # Using ODE solver to refine the maximas further.
+ maximas = _optimize_centers(maximas, grad_func)
+
+ # Construct a radial grid for each atom by taking distance to the closest five atoms.
+ # Added an extra padding in the case of carbon in CH4
+ # TODO: the upper-bound should depend on distance to isosurface value and distance
+ # between atoms
+ dist_maxs = cdist(maximas, maximas)
+ distance_maximas = np.sort(dist_maxs, axis=1)[:, min(5, maximas.shape[0] - 1)]
+ print(cdist(maximas, maximas))
+ print(distance_maximas + 5.0)
+ ss0 = 0.23
+ radial_grid = [
+ np.arange(0.2, x + 5.0, ss0) for x in distance_maximas
+ ]
+ input("Hello")
+
+ numb_maximas = len(maximas)
+ angular_pts = AngularGrid(degree=angular).points if isinstance(angular, int) else angular
+ r, thetas, phis = convert_cart_to_sph(angular_pts).T
+ numb_ang_pts = len(thetas)
+
+ # Determine beta-spheres from a smaller angular grid
+ # Degree can't be too small or else the beta-radius is too large and a IAS poitn got
+ # classified as a OAS point
+ ang_grid = AngularGrid(degree=10)
+ # TODO: Do the spherical trick then do the beta-sphere
+ if beta_spheres is None:
+ beta_spheres = determine_beta_spheres(
+ beta_spheres, maximas, radial_grid, ang_grid.points, dens_func, grad_func
+ )
+ beta_spheres = np.array(beta_spheres)
+ # Check beta-spheres are not intersecting
+ condition = dist_maxs <= beta_spheres[:, None] + beta_spheres
+ condition[range(len(maximas)), range(len(maximas))] = False # Diagonal always true
+ if np.any(condition):
+ raise ValueError(f"Beta-spheres {beta_spheres} overlap with one another.")
+
+ r_func = [np.zeros((numb_ang_pts,), dtype=np.float64) for _ in range(numb_maximas)]
+ oas = [[] for _ in range(numb_maximas)] # outer atomic surface
+ ias = [[] for _ in range(numb_maximas)] # inner atomic surface.
+ basin_ias = [[] for _ in range(numb_maximas)] # basin ids for inner atomic surface.
+ refined_ang = [] if refine else None
+ maxima_to_do = range(0, numb_maximas) if type(refine) == type(True) else [refine] # refining
+ for i_maxima, maxima in enumerate(maximas):
+ # Maximas aren't usually large, so doing this is okay. Quick fix to use refinement without
+ # re-writing this function into seperate functions.
+ if i_maxima in maxima_to_do:
+ print("Start: Maxima ", maxima)
+
+ # First classify points as either watershed/IAS or isosurface/OAS
+ # Each angular point would have a different radial grid associated with it.
+ radial = radial_grid[i_maxima][radial_grid[i_maxima] >= beta_spheres[i_maxima]]
+ ias_indices = []
+ ias_basin = []
+ ias_radius = [] # Radius to start at
+ indices_to_classify = np.arange(len(angular_pts))
+ for i_rad in range(0, len(radial)): # Go through each radial shell
+ # Construct points on the angular points that aren't classified yet.
+ all_points = maxima + radial[i_rad, None] * angular_pts[indices_to_classify, :]
+
+ start = time.time()
+ # basins = steepest_ascent_rk45(
+ # all_points, dens_func, grad_func, beta_spheres, maximas, tol=1e-7, max_ss=0.5, ss_0=0.23
+ # )
+ basins = steepest_ascent_rk45(
+ all_points, dens_func, grad_func #, beta_spheres, maximas, tol=1e-7, max_ss=0.5, ss_0=0.23
+ )
+ final = time.time()
+ print("Basins", basins)
+ print("Difference ", final - start)
+
+ # Get indices of where they went to a different basin
+ basin_switch_ind_local = np.where(basins != i_maxima)[0]
+ watershed_indices = indices_to_classify[basin_switch_ind_local]
+ print("Global indices (Points) that needs to be refined", watershed_indices)
+
+ # If some points went to a different a basin, then record them as IAS
+ indices_to_classify = np.delete(indices_to_classify, basin_switch_ind_local)
+ ias_indices += list(watershed_indices)
+ ias_basin += list(basins[basin_switch_ind_local])
+ ias_radius += [radial[i_rad - 1]] * len(basin_switch_ind_local)
+
+
+ # Rest of the points are OAS
+ oas_indices = indices_to_classify
+
+ # Sort the IAS points
+ indices = np.argsort(ias_indices)
+ ias_indices = np.array(ias_indices, dtype=int)[indices]
+ ias_basin = np.array(ias_basin, dtype=int)[indices]
+ ias_radius = np.array(ias_radius)[indices]
+
+ print("IAS indices ", ias_indices)
+ print("IAS basins", ias_basin)
+ print("OAS indices ", oas_indices)
+
+ """
+ #Useful for debugging the classification process
+ old_points = maxima + angular_pts
+ import matplotlib
+ import matplotlib.pyplot as plt
+ from mpl_toolkits import mplot3d
+ matplotlib.use("Qt5Agg")
+ fig = plt.figure()
+ ax = plt.axes(projection='3d')
+ p = maximas
+ ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="g", s=60)
+ p = old_points[ias_indices]
+ ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="k")
+ p = old_points[oas_indices]
+ ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="r")
+ p = old_points[[99, 100]]
+ ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="m", s=40)
+ p = np.array([
+ [3.33215211e+00, 3.63210261e+00, -6.14962715e-01],
+ [3.33214688e+00, -3.63213146e+00, 6.14961201e-01]])
+ ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="y", s=50)
+ plt.show()
+ """
+
+ # Solve for the watershed/IAS points.
+ all_ss = np.array([ss0 / 10.0] * len(ias_indices)) # Step-size of each watershed point
+ all_ss = np.fmax(all_ss, bnd_err)
+ ias_radius = np.array(ias_radius) + all_ss # increase by ss
+ indices_to_solve = ias_indices.copy()
+ while len(indices_to_solve) != 0:
+ print("Indices to solve watershed ", indices_to_solve)
+ print("Current step-size ", all_ss)
+ # Construct points on the angular points that aren't classified yet.
+ all_points = maxima + ias_radius[:, None] * angular_pts[indices_to_solve, :]
+
+ # If the density values is less than the isosurface value, then the point
+ # should have been classified as a OAS
+ dens_vals = dens_func(all_points)
+ print("Density Values ", dens_vals)
+ dens_small_ind = np.where(dens_vals < iso_val)[0]
+ if len(dens_small_ind) != 0:
+ print("(Local) Indices where density is small ", dens_small_ind)
+ print("(Global) indices where density is small ", indices_to_solve[dens_small_ind])
+ print("Before ias_indices ", ias_indices)
+ # Remove (globally) from ias_indices, add to oas_indices
+ is_in = np.isin(ias_indices, indices_to_solve[dens_small_ind])
+ oas_indices = np.hstack((oas_indices, ias_indices[np.where(is_in)[0]]))
+ ias_indices = ias_indices[np.where(~is_in)[0]]
+ ias_basin = ias_basin[np.where(~is_in)[0]]
+ # Delete from local information
+ indices_to_solve = np.delete(indices_to_solve, dens_small_ind)
+ ias_radius = np.delete(ias_radius, dens_small_ind)
+ all_ss = np.delete(all_ss, dens_small_ind)
+ all_points = np.delete(all_points, dens_small_ind, axis=0)
+
+ # Calculate basins of all of these points
+ start = time.time()
+ basins = steepest_ascent_rk45(
+ all_points, dens_func, grad_func, beta_spheres, maximas, tol=1e-7, max_ss=0.5, ss_0=0.23
+ )
+ final = time.time()
+ print("Basins Assigned ", basins)
+ print("Difference ", final - start)
+ # Get indices of where they went to a different basin
+ basin_switch_ind_local = np.where(basins != i_maxima)[0]
+ print("Global indices (Points) that needs to be refined: ", indices_to_solve[basin_switch_ind_local])
+
+ # If basins are different, make sure they match the correct basins
+ if len(basin_switch_ind_local) != 0:
+ basins_vals = basins[basin_switch_ind_local]
+ print("Basin_vals that switched ", basins_vals)
+ print("Local Indices that switched ", basin_switch_ind_local)
+ print("IAS indices ", ias_indices)
+ print("IAS basins", ias_basin)
+ print("Actual indices ", np.where(np.in1d(ias_indices, indices_to_solve[basin_switch_ind_local]))[0])
+ original_basins = ias_basin[np.where(np.in1d(ias_indices, indices_to_solve[basin_switch_ind_local]))[0]]
+ print("Original Basins ", original_basins)
+ # if np.any(basins_vals != original_basins):
+ # raise ValueError(f"Basin switched")
+
+ # Check convergence of watershed points that switch to different basin
+ watershed_conv_ind = np.where(all_ss <= bnd_err)[0]
+ print(all_ss[basin_switch_ind_local], bnd_err, all_ss[basin_switch_ind_local] <= bnd_err)
+ indices_conv = indices_to_solve[watershed_conv_ind]
+ if len(indices_conv) != 0:
+ print("Global Indices that converged ", indices_conv)
+ # Get the boundary points:
+ radius_bnd_pts = (2.0 * ias_radius[watershed_conv_ind] + all_ss[watershed_conv_ind]) / 2.0
+ bnd_pts = maxima + radius_bnd_pts[:, None] * angular_pts[watershed_conv_ind]
+
+ # Store the result:
+ r_func[i_maxima][indices_conv] = np.linalg.norm(bnd_pts - maxima, axis=1)
+ [ias[i_maxima].append(x) for x in indices_conv]
+ original_basins = ias_basin[np.where(np.in1d(ias_indices, indices_to_solve[basin_switch_ind_local]))[0]]
+ [basin_ias[i_maxima].append(x) for x in original_basins]
+
+ # Delete to avoid for the next iteration
+ indices_to_solve = np.delete(indices_to_solve, watershed_conv_ind)
+ ias_radius = np.delete(ias_radius, watershed_conv_ind)
+ all_ss = np.delete(all_ss, watershed_conv_ind)
+ basins = np.delete(basins, watershed_conv_ind)
+
+
+ # The ones that different converge, adjust its radius and step-size
+ basin_same_ind_local = np.where(basins == i_maxima)[0]
+ basin_switch_ind_local = np.where(basins != i_maxima)[0]
+ # The ones that basins didn't switch, take a step with step-size
+ # if it reached upper-bound then a problem occured.
+ ias_radius[basin_same_ind_local] += all_ss[basin_same_ind_local]
+ # TODO: Add a upper-bound check
+ # The ones that basins switched, take a step-back and adjust step-size
+ # adjust upper-bound to be the current point.
+ ias_radius[basin_switch_ind_local] -= all_ss[basin_switch_ind_local]
+ all_ss[basin_switch_ind_local] = np.fmax(all_ss[basin_switch_ind_local] / 10.0, bnd_err)
+
+ print("\n")
+
+ # Solve for the root of each OAS indices
+ for i_oas in oas_indices:
+ # Construct upper and lower bound of the isosurface equation
+ ang_pt = angular_pts[i_oas]
+ iso_eq = np.abs(dens_func(maxima + ang_pt * radial[:, None]) - iso_val)
+ i_iso = np.argsort(iso_eq)[0]
+ l_bnd = radial[i_iso - 1] if i_iso >= 0 else radial[i_iso] / 2.0
+ u_bnd = radial[i_iso + 1] if i_iso + 1 < len(radial) else radial[i_iso] * 2.0
+ # Solve for the isosurface point
+ oas_pt = solve_for_isosurface_pt(
+ l_bnd, u_bnd, maxima, angular_pts[i_oas], dens_func, iso_val, iso_err
+ )
+ # print("Check isosurface pt", oas_pt, dens_func(np.array([oas_pt])))
+ # Record them
+ r_func[i_maxima][i_oas] = np.linalg.norm(oas_pt - maxima)
+ oas[i_maxima].append(i_oas)
+
+ if type(refine) == type(True) and refine: # refine can be integer, so this ignores it.
+ # Take convex hull between ias and oas and construct additional points in that region.
+ # `new_pts` is concatenated to angular grids and is in cartesian coordinates.
+ print("IAS ", ias[i_maxima])
+ print("OAS", oas[i_maxima])
+ new_pts = construct_points_between_ias_and_oas(
+ ias[i_maxima], oas[i_maxima], angular_pts, r_func[i_maxima], maxima
+ )
+ print("new pts ", new_pts, np.linalg.norm(new_pts, axis=1))
+ # Re-do this qtaim algortihm only on this center
+ refined_qtaim = qtaim_surface(new_pts, maximas, dens_func,
+ grad_func, iso_val, dens_cutoff,
+ bnd_err, iso_err, beta_spheres=beta_spheres,
+ optimize_centers=False, refine=i_maxima)
+ print("Refined", refined_qtaim.ias, refined_qtaim.oas)
+ # Update this basin's result from the refined, + numb_ang_pts: corrects indices
+ ias[i_maxima] += [x + numb_ang_pts for x in refined_qtaim.ias[i_maxima]]
+ oas[i_maxima] += [x + numb_ang_pts for x in refined_qtaim.oas[i_maxima]]
+ basin_ias[i_maxima] += refined_qtaim.basins_ias[i_maxima]
+ refined_ang.append(new_pts)
+ print(refined_qtaim.r_func, r_func[i_maxima].shape)
+ r_func[i_maxima] = np.hstack((r_func[i_maxima], refined_qtaim.r_func[i_maxima]))
+ # input("Why")
+
+ print("\n")
+ return SurfaceQTAIM(r_func, angular, maximas, oas, ias, basin_ias, refined_ang)
+
+
diff --git a/chemtools/topology/surface.py b/chemtools/topology/surface.py
new file mode 100644
index 00000000..5a405047
--- /dev/null
+++ b/chemtools/topology/surface.py
@@ -0,0 +1,547 @@
+# -*- coding: utf-8 -*-
+# ChemTools is a collection of interpretive chemical tools for
+# analyzing outputs of the quantum chemistry calculations.
+#
+# Copyright (C) 2016-2019 The ChemTools Development Team
+#
+# This file is part of ChemTools.
+#
+# ChemTools is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# ChemTools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see
+#
+# --
+r"""
+Data structure that holds the outer-atomic (OAS) and intra-atomic (IAS) surfaces.
+
+Can be used for
+- analyzing the IAS and OAS.
+- integration over basins.
+"""
+from chemtools.topology.utils import solve_for_oas_points, solve_intersection_of_ias_point, solve_intersection_of_ias_point_hirshfeld
+
+import matplotlib.pyplot as plt
+from mpl_toolkits import mplot3d
+import numpy as np
+from scipy.spatial import ConvexHull, Delaunay, KDTree
+from scipy.spatial.distance import cdist
+
+from grid.angular import AngularGrid
+from grid.atomgrid import AtomGrid
+from grid.basegrid import Grid
+
+__all__ = ["SurfaceQTAIM"]
+
+
+class SurfaceQTAIM:
+ def __init__(self, r_func, angular_degs, maximas, indices_maxima, oas, ias, basins_ias, iso_val, beta_spheres):
+ self._r_func = r_func
+ self._maximas = maximas
+ self._indices_maxima = indices_maxima
+ self._angular_degs = angular_degs
+ self._oas = oas
+ self._ias = ias
+ self._basins_ias = basins_ias
+ self._iso_val = iso_val
+ self._beta_spheres = beta_spheres
+
+ @property
+ def beta_spheres(self):
+ # List[Float]: Radius points that indicate all points converge to the maxima.
+ return self._beta_spheres
+ @property
+ def iso_val(self):
+ # Float: Isosurface value of the outer atomic surface
+ return self._iso_val
+
+ @property
+ def r_func(self):
+ # List[M, np.ndarray(N_i,)] ::math:`r_j(\theta_i, \phi_i)` for each M basins and N_i
+ # angular points. Length matches length of `maximas`.
+ return self._r_func
+
+ @property
+ def oas(self):
+ # List[List[int]] : First list is over basins, second over indices of points of outeratomic
+ # surface. Length matches length of `indices_maxima`
+ return self._oas
+
+ @property
+ def ias(self):
+ # List[List[int]] : First list is over basins, second over indices of points of interatomic
+ # surface. Length matches length of `indices_maxima`
+ return self._ias
+
+ @property
+ def maximas(self):
+ # ndarray(M, 3) : The maxima of all basins.
+ return self._maximas
+
+ @property
+ def indices_maxima(self):
+ # list[int]: List of indices of `centers` that correspond to each row of `maximas`.
+ return self._indices_maxima
+
+ @indices_maxima.setter
+ def indices_maxima(self, value):
+ self._indices_maxima = value
+
+ @property
+ def angular_degs(self):
+ # int or List[int] : List of angular degrees over each basin. Length matches length of `indices_maxima`
+ return self._angular_degs
+
+ @property
+ def rad_grids(self):
+ # List[OneDGrids] : List of M OneDGrids for integrating over radial component in [0, \inty).
+ return self._rad_grids
+
+ @property
+ def basins_ias(self):
+ return self._basins_ias
+
+ def __add__(self, other):
+ if np.abs(self.iso_val - other.iso_val) > 1e-8:
+ raise RuntimeError(f"Isosurface value should match each other when combing surface objects.")
+ object = SurfaceQTAIM(
+ self.r_func, self.angular_degs, self.maximas, self.indices_maxima, self.oas, self.ias,
+ self.basins_ias, self.iso_val, self.beta_spheres
+ )
+ object.indices_maxima = sorted(list(set(self.indices_maxima).union(set(other.indices_maxima))))
+ for i_replace in other.indices_maxima:
+ object.ias[i_replace] = other.ias[i_replace]
+ object.oas[i_replace] = other.oas[i_replace]
+ object.basins_ias[i_replace] = other.basins_ias[i_replace]
+ object.angular_degs[i_replace] = other.angular_degs[i_replace]
+ object.r_func[i_replace] = other.r_func[i_replace]
+ return object
+
+ def save(self, filename):
+ save_dict = {
+ "ias": np.array(self.ias, dtype=object),
+ "oas": np.array(self.oas, dtype=object),
+ "basin_ias": np.array(self.basins_ias, dtype=object),
+ "maximas": np.array(self.maximas),
+ "indices_maxima": np.array(self.indices_maxima),
+ "angular_degs": np.array(self.angular_degs),
+ "r_func": np.array(self.r_func, dtype=object),
+ "iso_val": self.iso_val,
+ "beta_spheres": np.array(self.beta_spheres, dtype=float)
+ }
+ np.savez(filename + ".npz", **save_dict, allow_pickle=True)
+
+ def generate_angular_grid_of_basin(self, i_basin, deg=None):
+ # Note this doesn't include the extra angular points generated by refinement.
+ if deg is not None:
+ deg = deg
+ else:
+ deg = self.angular_degs
+ deg = deg[i_basin] if isinstance(deg, (list, np.ndarray)) else deg
+ return AngularGrid(degree=deg, method="spherical")
+
+ def generate_angular_pts_of_basin(self, i_basin, deg=None):
+ angular_grid = self.generate_angular_grid_of_basin(i_basin, deg)
+ points = angular_grid.points
+ return points
+
+ def get_atom_grid_over_basin(self, i_basin, rgrid=None):
+ # integrate over a basin.
+ deg = self.angular_degs
+ deg = deg[i_basin] if isinstance(deg, list) else deg
+ rgrid = self.rad_grids if rgrid is None else rgrid
+ # If rgrid is a list for every center, then obtain the right one. Else is it is OneDGrid.
+ if isinstance(rgrid, list):
+ if len(rgrid) > 1:
+ rgrid = self.rad_grids[i_basin]
+ else:
+ rgrid = rgrid[0]
+ atom_grid = AtomGrid(rgrid, degrees=[deg], center=self.maximas[i_basin], method="spherical")
+
+ ias_indices_a = self.ias[i_basin]
+ r_limits = self.r_func[i_basin][ias_indices_a]
+
+ # Holds indices of each point on the angular grid, where the radial points should be zero afterwards.
+ ias_indices, rad_indices = np.where(atom_grid.rgrid.points[None, :] > r_limits[:, None])
+ start_indices = atom_grid.indices[rad_indices] # Get the radial shell that includes the index rad_indices.
+ ias_indices = np.array(ias_indices, dtype=int)
+ start_indices = np.array(start_indices, dtype=int)
+ indices_zero = np.array(start_indices + np.array(ias_indices_a, dtype=int)[ias_indices], dtype=int)
+ atom_grid.weights[indices_zero] = 0.0
+
+ indices_zero = np.where(atom_grid.weights == 0.0)[0]
+ points = np.delete(atom_grid.points, indices_zero, axis=0)
+ weights = np.delete(atom_grid.weights, indices_zero)
+
+ return Grid(points, weights)
+
+ def generate_pts_on_surface(self, i_basin, include_other_surfaces=False):
+ r"""
+ Generates points on the surface of an atom.
+
+ Parameters
+ ----------
+ i_basin : int
+ Which basin you want to generate the surface for.
+ include_other_surfaces: bool
+ If true, then it add IAS points from other basins (other than `i_basin`)
+ that crosses the `i_basin`th basin. This adds more points to the surface.
+
+ Returns
+ -------
+ ndarray(N, 3):
+ 3D-coordinates of each point on the surface.
+
+ """
+ sph_pts = self.generate_angular_pts_of_basin(i_basin)
+ if len(self.r_func[i_basin]) != 0:
+ points = self.maximas[i_basin] + self.r_func[i_basin][:, None] * sph_pts
+ else:
+ points = np.empty((0, 3), dtype=np.float64)
+ if include_other_surfaces:
+ if i_basin < 0.0:
+ raise ValueError(f"Basin Index {i_basin} cannot be negative here.")
+
+ for i in range(len(self.maximas)):
+ if i != i_basin:
+ # If this basin crosses the boundary.
+ indices = np.where(np.abs(i_basin - np.array(self.basins_ias[i])) < 1e-10)[0]
+ if len(indices) != 0:
+ ias_indices = np.array(self.ias[i])[indices]
+ sph_pts = self.generate_angular_pts_of_basin(i)
+ new_pts = self.maximas[i] + self.r_func[i][ias_indices, None] * sph_pts[ias_indices]
+ points = np.vstack((points, new_pts))
+ # There could be multiple points close to each other, this removes them
+ points, indices = np.unique(np.round(points, 16), axis=0, return_index=True)
+ return points[np.argsort(indices), :]
+ return points
+
+ def get_ias_pts_of_basin(self, i_basin, include_other_surfaces=False):
+ ias = self.ias[i_basin]
+ sph_pts = self.generate_angular_pts_of_basin(i_basin)
+ points = self.maximas[i_basin] + self.r_func[i_basin][ias, None] * sph_pts[ias, :]
+ if include_other_surfaces:
+ if i_basin < 0.0:
+ raise ValueError(f"Basin Index {i_basin} cannot be negative here.")
+
+ for i_other in range(len(self.maximas)):
+ if i_other != i_basin and i_other in self.indices_maxima:
+ # If this basin crosses the boundary.
+ indices = np.where(np.abs(i_basin - np.array(self.basins_ias[i_other])) < 1e-10)[0]
+ if len(indices) != 0:
+ ias_indices = np.array(self.ias[i_other])[indices]
+ sph_pts = self.generate_angular_pts_of_basin(i_other)
+ new_pts = self.maximas[i_other] + self.r_func[i_other][ias_indices, None] * sph_pts[ias_indices]
+ points = np.vstack((points, new_pts))
+ # There could be multiple points close to each other, this removes them
+ points = points.astype(float)
+ points, indices = np.unique(np.round(points, 16), axis=0, return_index=True)
+ return points[np.argsort(indices), :]
+ return points
+
+ def get_oas_pts_of_basin(self, i_basin):
+ oas = self.oas[i_basin]
+ sph_pts = self.generate_angular_pts_of_basin(i_basin)
+ return self.maximas[i_basin] + self.r_func[i_basin][oas, None] * sph_pts[oas]
+
+ def get_surface_of_groups_of_atoms(self, atom_indices, tau=0.1, include_other_surfaces=False):
+ # Automatically generate radius, don't want the radius to be too large or too small
+ dist = np.max(cdist(self.maximas[atom_indices], self.maximas[atom_indices]), axis=1)
+ max_dist = [np.max(self.r_func[i]) for i in atom_indices]
+ radius = [dist[i] + max_dist[i] + tau for i in range(len(atom_indices))]
+
+ # Get all points of all surfaces
+ surface_pts = [self.generate_pts_on_surface(i, include_other_surfaces) for i in atom_indices]
+ # indices [0, l_1, l_1 + l_2, ...], where l_1 is the length of surface_pts[0]
+ numb_pts_indices = [0] + [sum([len(x) for x in surface_pts[:(i + 1)]]) for i in range(len(atom_indices))]
+
+ points_of_all_surfaces = np.vstack(surface_pts)
+ kd_tree = KDTree(points_of_all_surfaces)
+ delaunay = Delaunay(points_of_all_surfaces)
+
+ # It's true if the points are already included
+ is_included = np.zeros(len(points_of_all_surfaces), dtype=bool)
+
+ points = np.empty((0, 3), dtype=float)
+ for i, i_atom in enumerate(atom_indices):
+ # Generate centers to run this procedure on
+ all_centers = [self.maximas[i_atom]]
+ nbh_basins = np.unique(self.basins_ias[i_atom])
+ for i_nbh_basin in nbh_basins:
+ # Construct line (pt2 -pt1) t + pt1, between the two basins centers pt1, pt2
+ line = ((self.maximas[i_nbh_basin] - self.maximas[i_atom]) *
+ np.linspace(0.0, 1.0, num=100)[:, None] + self.maximas[i_atom])
+
+ # Find the closest point on the atomic surface to the line
+ dist, indices = kd_tree.query(line, k=1, workers=-1)
+ i_dist = np.argmin(dist)
+ center_on_IAS = points_of_all_surfaces[indices[i_dist]]
+
+ # Add the new center to all_centers
+ all_centers.append(center_on_IAS)
+
+ # For each center, construct a ray across each point around the sphere. Then remove the points
+ # that are inside the convex hull. Then find the closest point on the surface
+ # of the rays outside.
+ for center in all_centers:
+ # Generate the grid, using radial grid and angular points, has shape nij
+ radial_g = np.arange(0.1, radius[i], 0.1)
+ ang_pts = self.generate_angular_pts_of_basin(i_atom)
+ pts = center[None, None, :] + np.einsum("n,ij->nij", radial_g, ang_pts)
+ # TODO: If you remove the reshape, then use argmax to find the closest point to the surface that
+ # is not inside, then you can simply the rest. THis is useful if memory becomes issue.
+ pts = pts.reshape((len(radial_g) * len(ang_pts), 3))
+
+ # Remove the points that are inside the surface.
+ indices = delaunay.find_simplex(pts) >= 0
+ pts = np.delete(pts, indices, axis=0)
+
+ # Find closest points on the grid to the surface
+ dist, indices = kd_tree.query(pts, k=1, workers=-1)
+
+ # Remove indices that are already included
+ indices = np.delete(indices, is_included[indices] == True)
+ is_included[indices] = True
+ points = np.vstack((points, points_of_all_surfaces[indices, :]))
+
+ # Get the indices of each atom that were selected
+ indices_per_atom = dict({})
+ for i in range(len(atom_indices)):
+ is_included_atom = is_included[numb_pts_indices[i]:numb_pts_indices[i + 1]]
+ indices_per_atom[atom_indices[i]] = np.where(is_included_atom)
+
+ return points, indices_per_atom
+
+ def get_surface_of_groups_of_atoms2(self, atom_indices, include_other_surfaces=False):
+ if np.any(np.array(atom_indices) < 0.0):
+ raise ValueError(f"Atom indices {atom_indices} cannot be negative here.")
+
+ all_pts = np.empty((0, 3), dtype=float)
+ for i_basin in atom_indices:
+ # Add Oas points
+ all_pts = np.vstack((all_pts, self.get_oas_pts_of_basin(i_basin)))
+
+ # Generate Points On IAS
+ pts_ias = self.get_ias_pts_of_basin(i_basin, False)
+
+ # Remove pts on ias that are in atom indices
+ basin_ids = self.basins_ias[i_basin]
+ indices_remove = np.full(len(pts_ias), False, dtype=bool)
+ for i_other_atom in atom_indices:
+ if i_basin != i_other_atom:
+ # Get the indices of this basin IAS that borders `i_other_atom`
+ indices = np.where(np.abs(i_other_atom - np.array(basin_ids)) < 1e-10)[0]
+ indices_remove[indices] = True
+ pts_ias = np.delete(pts_ias, indices_remove, axis=0)
+
+ # Add the new ias pts
+ all_pts = np.vstack((all_pts, pts_ias))
+
+ # Include other surfaces that aren't bordering the atom indices
+ if include_other_surfaces:
+ for i in range(len(self.maximas)):
+ if i != i_basin and i in self.indices_maxima and i not in atom_indices:
+ # If this basin crosses the boundary.
+ indices = np.where(np.abs(i_basin - np.array(self.basins_ias[i])) < 1e-10)[0]
+ if len(indices) != 0:
+ ias_indices = np.array(self.ias[i])[indices]
+ sph_pts = self.generate_angular_pts_of_basin(i)
+ new_pts = self.maximas[i] + self.r_func[i][ias_indices, None] * sph_pts[ias_indices]
+ all_pts = np.vstack((all_pts, new_pts))
+ return all_pts
+
+ def construct_points_between_ias_and_oas(self, basin_ids, dens_func, grad_func, ss_0, max_ss, tol, iso_err,
+ method="qtaim", basin_assignment_callable=None):
+ r"""
+ Construct points between the inner atomic surface and outer atomic surface.
+
+ This is done by constructed a convex hull between IAS and OAS, separately.
+ Each point on the IAS, the two closest points are found on the OAS, then
+ a triangle is constructed. Seven points are constructed within this triangle
+ and the Cartesian coordinates of the sphere centered at the maxima is solved
+ for each of these seven points.
+
+ Parameters
+ -----------
+ basin_ids: list[int]
+ List of integers specifying the index of the basins/maximas for refinement is to be performed.
+ iso_err: float
+ The error in solving for the isosurface points on the outer-atomic surface.
+ method: str
+ Either "qtaim" or "hirshfeld". If it is Hirshfeld-type methods,
+ then provide the basin_assignment_callable.
+
+ Returns
+ -------
+ ndarray(K * 7, 3)
+ Cartesian coordinates of :math:`K` points on the sphere centered at `maxima` such that
+ they correspond to the seven points constructed above, where :math:`K` is the number
+ of points on the IAS of `maxima`.
+
+ """
+ if method == "hirshfeld" and basin_assignment_callable is None:
+ raise ValueError(f"Basin assignment callable should not be None.")
+ ias_parameters = [] # parameters needed for solving IAS
+ all_angular_pts = []
+ indices_for_each_basin = [0]
+ for i_basin in basin_ids:
+ maxima = self.maximas[i_basin]
+ ias = self.ias[i_basin]
+ oas = self.oas[i_basin]
+
+ if len(oas) <= 3:
+ return np.empty((0, 3), dtype=float)
+ r_func_max = self.r_func[i_basin]
+ angular_pts = self.generate_angular_pts_of_basin(i_basin)
+ # Take a convex hull of both IAS and OAS seperately.
+ ias_pts = maxima + r_func_max[ias, None] * angular_pts[ias, :]
+ oas_pts = maxima + r_func_max[oas, None] * angular_pts[oas, :]
+ ias_hull = ConvexHull(ias_pts)
+ oas_hull = ConvexHull(oas_pts)
+ ias_bnd = ias_hull.points[ias_hull.vertices]
+ oas_bnd = oas_hull.points[oas_hull.vertices]
+
+ # Compute the distance matrix
+ dist_mat = cdist(ias_bnd, oas_bnd)
+ i_smallest_dist = np.argmin(dist_mat, axis=1)
+ smallest_dist = dist_mat[np.arange(len(ias_bnd)), i_smallest_dist]
+ # print("Smallest distance between ias and oas ", np.sort(smallest_dist))
+ # print("new pts ", new_pts)
+
+ # fig = plt.figure()
+ # ax = plt.axes(projection='3d')
+ # p = ias_bnd
+ # ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="k")
+ # p = ias_bnd[smallest_dist < np.exp(np.mean(np.log(smallest_dist)) - 2.0 * np.std(np.log(smallest_dist))), :]
+ # ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="m")
+ # p = oas_bnd
+ # ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="r")
+ # plt.show()
+
+ # for each point in say ias take the closest two points in oas.
+ new_ang_pts = np.zeros((0, 3), dtype=np.float64) # usually 7 points per ias boundary are added.
+ # Assumes the distances have a log-normal distribution, and taking the second quantile to get the
+ # points closest to the OAS from the IAS.
+ extreme_ends = np.exp(np.mean(np.log(smallest_dist)) - 1.2 * np.std(np.log(smallest_dist)))
+ # print("Distance tolerance ", extreme_ends)
+ indices = np.where(smallest_dist < extreme_ends)[0]
+ i_angular = 0
+ for i_ias in indices:
+ pt_ias = ias_bnd[i_ias, :]
+ # Get the two closest points on OAS to this IAS pt.
+ two_indices = dist_mat[i_ias].argsort()[:2]
+ pt1, pt2 = oas_bnd[two_indices[0]], oas_bnd[two_indices[1]]
+
+ # Take the center and midpoint between each line of the triangle (pt_ias, pt1, pt2)
+ midpoint = (pt1 + pt2 + pt_ias) / 3.0
+ line_pt1 = (pt1 + pt_ias) / 2.0
+ line_pt2 = (pt2 + pt_ias) / 2.0
+ line_pt3 = (pt1 + pt2) / 2.0
+
+ # The triangle with the center can be split into three polygons, take the center of each.
+ poly_pt1 = (midpoint + line_pt1 + line_pt2 + pt_ias) / 4.0
+ poly_pt2 = (midpoint + line_pt1 + line_pt3 + pt1) / 4.0
+ poly_pt3 = (midpoint + line_pt2 + line_pt3 + pt2) / 4.0
+
+ new_pts = np.array([midpoint, line_pt1, line_pt2, line_pt3, poly_pt1, poly_pt2, poly_pt3])
+ # Solve for the Cartesian angular coordinates of these 7 points by solving
+ # r = m + t direction, where m is the maxima, direction has norm one, r is each of
+ # these points
+ new_pts = new_pts - maxima
+ # Delete points that are on the maxima.
+ direction = np.delete(new_pts, np.all(np.abs(new_pts) < 1e-10, axis=1), axis=0)
+ radiuses = np.linalg.norm(direction, axis=1)
+ direction = direction / radiuses[:, None]
+ # Delete directions that are the same
+ direction, indices = np.unique(direction, axis=0, return_index=True)
+ radiuses = radiuses[indices]
+ new_ang_pts = np.vstack((new_ang_pts, direction))
+
+ # Add the correct IAS parameters to `ias_parameters`
+ for i in range(len(new_pts)):
+ # Construct lower and upper bound such that it is at the midpoint
+ l_bnd = radiuses[i] - 0.1
+ u_bnd = radiuses[i] + 0.1
+ ss = 0.05
+ ias_parameters.append(
+ [i_basin, i_angular, l_bnd, u_bnd, ss, -1, i_angular]
+ )
+ i_angular += 1
+
+ all_angular_pts.append(new_ang_pts)
+ indices_for_each_basin.append(sum(indices_for_each_basin) + len(new_ang_pts))
+ indices_for_each_basin.append(len(all_angular_pts))
+ ias_parameters = np.array(ias_parameters)
+
+ print("Solve for the new refinement")
+ # Solve for the IAS
+
+ angular_pts = [[0.0, 0.0, 0.0]] * len(self.maximas)
+ ias_lengths = [1] * len(self.maximas)
+ for i, i_basin in enumerate(basin_ids):
+ angular_pts[i_basin] = all_angular_pts[i]
+ ias_lengths[i_basin] = len(all_angular_pts[i])
+
+ if method == "qtaim":
+ r_func_new, _ = solve_intersection_of_ias_point(
+ self.maximas, ias_parameters, angular_pts, dens_func, grad_func, self.beta_spheres,
+ bnd_err=1e-5, ias_lengths=ias_lengths, ss_0=ss_0, max_ss=max_ss, tol=tol,
+ )
+ elif method == "hirshfeld":
+ r_func_new, _ = solve_intersection_of_ias_point_hirshfeld(
+ self.maximas, ias_parameters, angular_pts, basin_assignment_callable, bnd_err=1e-5, ias_lengths=ias_lengths
+ )
+ else:
+ raise ValueError(f"Method {method} not recognized.")
+
+ # For each basin_id, check if their density values are not less than the isosurface value.
+ all_pts = []
+ for i, i_basin in enumerate(basin_ids):
+ new_pts = self.maximas[i_basin] + r_func_new[i_basin][:, None] * all_angular_pts[i]
+ print(f"Number of new points to add: {len(new_pts)}")
+
+ # Check if the new points are less than isosurface value and project them so that they do have .
+ dens_vals = dens_func(new_pts)
+ indices = np.where(dens_vals < self.iso_val)[0]
+ if len(indices) != 0:
+ # Construct bounded interval to solve for the root.
+ solve_for_oas_points(
+ np.array([self.maximas[i_basin]]), [0], [indices], [all_angular_pts[i]],
+ dens_func, self.iso_val, iso_err, [r_func_new[i_basin]]
+ )
+ new_pts[indices] = self.maximas[i_basin] + r_func_new[i_basin][indices, None] * all_angular_pts[i][indices, :]
+
+ all_pts.append(new_pts)
+ return all_pts
+
+ def plot_basins(self, basins, include_other_surfaces=False, annotate_maximas=True):
+ fig = plt.figure()
+ ax = plt.axes(projection='3d')
+ p = self.maximas
+ ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="g", s=60, label="Maximas")
+ for i_basin in basins:
+ p = self.get_ias_pts_of_basin(i_basin, include_other_surfaces=include_other_surfaces)
+ ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="k")
+ p = self.get_oas_pts_of_basin(i_basin)
+ ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="r")
+ if annotate_maximas:
+ for i, x in enumerate(self.maximas):
+ ax.text(x[0], x[1], x[2], "%s" % (str(i)), size=12, zorder=1)
+ plt.show()
+
+ def interpolate_radial_func(self, method="smooth", ias=False, oas=False):
+ # if method not in ["smooth", ]
+ if ias and oas:
+ raise ValueError(f"Both {ias} and {oas} cannot be true.")
+ if ias:
+ #TODO
+ pass
+ raise NotImplementedError(f"Not implemented yet.")
diff --git a/chemtools/topology/test/test_qtaim.py b/chemtools/topology/test/test_qtaim.py
new file mode 100644
index 00000000..7673fef9
--- /dev/null
+++ b/chemtools/topology/test/test_qtaim.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+# ChemTools is a collection of interpretive chemical tools for
+# analyzing outputs of the quantum chemistry calculations.
+#
+# Copyright (C) 2016-2019 The ChemTools Development Team
+#
+# This file is part of ChemTools.
+#
+# ChemTools is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# ChemTools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see
+#
+# --
+import pytest
+import pathlib
+import numpy as np
+from scipy.integrate import solve_ivp
+
+from chemtools.wrappers import Molecule
+from chemtools.topology.qtaim import qtaim_surface_vectorize
+
+from grid.onedgrid import UniformInteger
+from grid.rtransform import PowerRTransform
+
+
+def _run_qtaim_algorithm(fchk, degs, iso_val=1e-10, iso_err=1e-5, bnd_err=1e-5, ss_0=0.01, max_ss=0.1, tol=1e-7):
+ file_path = pathlib.Path(__file__).parent.resolve().__str__()[:-13]
+ file_path += "data/examples/" + fchk
+
+ mol = Molecule.from_file(file_path)
+ centers = mol.coordinates
+ gaussian_func = lambda pts: mol.compute_density(pts)
+ gradient_func = lambda pts: mol.compute_gradient(pts)
+
+ result = qtaim_surface_vectorize(
+ degs, centers, gaussian_func, gradient_func,
+ iso_val=iso_val, bnd_err=bnd_err, iso_err=iso_err, optimize_centers=True,
+ ss_0=ss_0, max_ss=max_ss, tol=tol
+ )
+ return mol, result
+
+
+@pytest.mark.parametrize(
+ "fchk, degs",
+ [
+ ("h2o.fchk", [30, 10, 10]),
+ ("nh3.fchk", [30, 10, 10, 10]),
+ ("ch4.fchk", [25, 15, 15, 15, 15])
+ ]
+)
+def test_atomic_density_sum_to_numb_electrons(fchk, degs):
+ r"""The sum of the atomic charges should equal to the charge of the molecule."""
+ mol, qtaim = _run_qtaim_algorithm(fchk, degs)
+
+ numb = 350
+ oned = UniformInteger(numb)
+ density_integral = 0.0
+ for i in range(len(mol.coordinates)):
+ b = max(np.max(qtaim.r_func[i][qtaim.ias[i]]), np.max(qtaim.r_func[i][qtaim.oas[i]])) + 1.0
+ rgrid = PowerRTransform(1e-10, b).transform_1d_grid(oned)
+ atomgrid_basin_0 = qtaim.get_atom_grid_over_basin(i, rgrid)
+
+ dens = mol.compute_density(atomgrid_basin_0.points)
+ print("Density Integral ", atomgrid_basin_0.integrate(dens))
+ density_integral += atomgrid_basin_0.integrate(dens)
+
+ print("Total Density Integral ", density_integral)
+ assert np.abs(density_integral - np.sum(mol.numbers)) < 1e-2
+
+
+@pytest.mark.parametrize(
+ "fchk, degs",
+ [
+ ("h2o.fchk", [30, 10, 10]),
+ ("nh3.fchk", [30, 10, 10, 10]),
+ ("ch4.fchk", [25, 15, 15, 15, 15])
+ ]
+)
+def test_laplacian_is_small(fchk, degs):
+ r"""Laplacian over each basin should be close to zero."""
+ mol, qtaim = _run_qtaim_algorithm(fchk, degs)
+
+ numb = 500
+ oned = UniformInteger(numb)
+ for i in range(len(mol.coordinates)):
+ b = max(np.max(qtaim.r_func[i][qtaim.ias[i]]), np.max(qtaim.r_func[i][qtaim.oas[i]])) + 1.0
+ rgrid = PowerRTransform(1e-10, b).transform_1d_grid(oned)
+ atomgrid_basin_0 = qtaim.get_atom_grid_over_basin(i, rgrid)
+ laplacian = 0.25 * mol.compute_laplacian(atomgrid_basin_0.points)
+ integral = atomgrid_basin_0.integrate(laplacian)
+
+ print("Laplacian Integral ", integral)
+ assert np.abs(integral) < 1e-3, "Laplacian Integral should be close to zero."
+
+
+@pytest.mark.parametrize(
+ "fchk, degs, iso_val, iso_err",
+ [
+ ("h2o.fchk", [25, 15, 15], 0.001, 1e-5),
+ ("h2o.fchk", [10, 25, 20], 1e-10, 1e-6),
+ ("h2o.fchk", [50, 10, 15], 1e-12, 1e-7),
+ ("h2o.fchk", [25, 15, 15], 0.01, 1e-7),
+ ("nh3.fchk", [25, 15, 15, 15], 0.001, 1e-6),
+ ("ch4.fchk", [25, 15, 15, 15, 15], 1e-10, 1e-5)
+ ]
+)
+def test_oas_isosurface_value(fchk, degs, iso_val, iso_err):
+ r"""Test the isosurface value of the OAS points are correct."""
+ mol, qtaim = _run_qtaim_algorithm(fchk, degs, iso_val, iso_err)
+ for i in range(len(mol.coordinates)):
+ oas_pts = qtaim.get_oas_pts_of_basin(i)
+ density = mol.compute_density(oas_pts)
+ print(np.abs(density - iso_val))
+ assert np.all(np.abs(density - iso_val) < iso_err)
+
+ # test ias pts density value is greater than isosurface value
+ ias_pts = qtaim.get_ias_pts_of_basin(i)
+ if len(ias_pts) != 0: # atom_kr would not have any ias pts.
+ density = mol.compute_density(ias_pts)
+ assert np.all(np.abs(density - iso_val) > iso_err)
+
+
+@pytest.mark.parametrize(
+ "fchk, degs, bnd_err",
+ [
+ ("h2o.fchk", [15, 8, 8], 1e-5),
+ ("h2o.fchk", [15, 8, 8], 1e-4),
+ ("h2o.fchk", [15, 8, 8], 1e-3),
+ ("nh3.fchk", [15, 8, 8, 8], 1e-5),
+ ("ch4.fchk", [15, 8, 8, 8, 8], 1e-5)
+ ]
+)
+def test_ias_basin_values(fchk, degs, bnd_err):
+ r"""Test IAS basin value assignment is correctly assigned."""
+ mol, qtaim = _run_qtaim_algorithm(fchk, degs, bnd_err=bnd_err, ss_0=0.01, max_ss=0.1, tol=1e-10)
+
+ def norm_grad_func(x):
+ grad = mol.compute_gradient(x)
+ return grad / np.linalg.norm(grad, axis=1)[:, None]
+
+ coords = qtaim.maximas
+ print("Coordinates ", coords)
+ for i in range(0, len(coords)):
+ # test ias pts density value is greater than isosurface value
+ ias_indices = qtaim.ias[i]
+ print("atom i ", i)
+ print(ias_indices)
+ ias_ang = qtaim.generate_angular_pts_of_basin(i)[ias_indices, :]
+
+ # None of the basin values should be the current maxima
+ basin_vals_ias = np.array(qtaim.basins_ias[i])
+ assert np.all(basin_vals_ias != i)
+
+ for j in range(len(ias_ang)):
+ basin_pt = basin_vals_ias[j]
+ ias_pt = coords[i] + ias_ang[j] * qtaim.r_func[i][ias_indices[j]]
+ # Should converge to the other basin
+ ias_pt_basin = coords[i] + ias_ang[j] * (qtaim.r_func[i][ias_indices[j]] + bnd_err * 10)
+ # Should converge to the current maxima
+ ias_pt_inner = coords[i] + ias_ang[j] * (qtaim.r_func[i][ias_indices[j]] - bnd_err * 10)
+ print(ias_pt, basin_pt)
+
+ # Should ODE on both ias_pt_basin and ias_pt_inner and make sure it converges
+ # to the current maxima
+ sol = solve_ivp(
+ lambda t, x: norm_grad_func(np.array([x]))[0].T,
+ y0=ias_pt_basin,
+ t_span=(0, 8),
+ method="RK45",
+ first_step=bnd_err,
+ max_step=0.23,
+ atol=1e-7,
+ rtol=1e-4,
+ )["y"][:, -1]
+ print(sol)
+ assert np.all(np.abs(sol - coords[basin_pt]) < 1e-1)
+
+ sol = solve_ivp(
+ lambda t, x: norm_grad_func(np.array([x]))[0].T,
+ y0=ias_pt_inner,
+ t_span=(0, 8),
+ method="RK45",
+ first_step=bnd_err,
+ max_step=0.23,
+ atol=1e-7,
+ rtol=1e-4,
+ )["y"][:, -1]
+ print(sol)
+ assert np.all(np.abs(sol - coords[i]) < 1e-1)
+
+ print("")
diff --git a/chemtools/topology/test_qtaim.py b/chemtools/topology/test_qtaim.py
new file mode 100644
index 00000000..784b3983
--- /dev/null
+++ b/chemtools/topology/test_qtaim.py
@@ -0,0 +1,501 @@
+# -*- coding: utf-8 -*-
+# ChemTools is a collection of interpretive chemical tools for
+# analyzing outputs of the quantum chemistry calculations.
+#
+# Copyright (C) 2016-2019 The ChemTools Development Team
+#
+# This file is part of ChemTools.
+#
+# ChemTools is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# ChemTools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see
+#
+# --
+from chemtools.topology.qtaim_depreciated import qtaim_surface
+# from chemtools.topology.qtaim import qtaim_surface_vectorize
+from chemtools.topology.yu_trinkle import qtaim, _get_area_of_coplanar_polygon
+
+import numpy as np
+import pathlib
+
+from scipy.integrate import solve_ivp
+from scipy.spatial import ConvexHull
+from scipy.stats import special_ortho_group
+from scipy.spatial.transform.rotation import Rotation
+from grid.cubic import Tensor1DGrids, UniformGrid
+from grid.onedgrid import OneDGrid, GaussChebyshev, GaussLaguerre, Trapezoidal
+from grid.rtransform import LinearFiniteRTransform, PowerRTransform, BeckeRTransform
+from grid.becke import BeckeWeights
+from grid.molgrid import MolGrid
+
+import pytest
+
+
+def _get_cubic_grid(l_bnd, u_bnd, ss):
+ oned = np.arange(l_bnd, u_bnd, ss)
+ oned_grid = OneDGrid(oned, oned)
+ return Tensor1DGrids(oned_grid, oned_grid, oned_grid)
+
+
+def _get_molecular_grid(centers):
+ # Construct two atomic grid whose Lebedev degrees increase.
+ oned = np.arange(0.001, 2.0, 0.25)
+ rgrid = OneDGrid(oned, oned)
+ numbs = np.array([1] * centers.shape[0])
+ return MolGrid.from_preset(atnums=numbs, atcoords=centers, rgrid=rgrid,
+ preset="coarse", aim_weights=BeckeWeights())
+
+
+@pytest.mark.parametrize("shape", np.random.randint(5, 30, size=(10, 3)))
+def test_with_simple_zero_flux_surface_of_two_exponentials_on_cubic_grid(shape):
+ r"""Test two exponentials symmetrically spaced apart on a cubic grid.
+
+ One is multipled by 0.98 to remove symmetry. Zero-flux surface occurs at (0, y, z).
+ """
+ centers = np.array([[-1, 0, 0], [1, 0, 0]])
+ gaussian_func = lambda pts: np.exp(-np.linalg.norm(pts - centers[0], axis=1)) + \
+ 0.98 * np.exp(-np.linalg.norm(pts - centers[1], axis=1))
+
+ # Define Grid and evaluate the density on the grid
+ origin = np.array([-1.5, -1.5, -1.5])
+ shape = np.array(shape)
+ axes = np.eye(3) * (1.5 + 1.5) / (shape - 1).T
+ grid = UniformGrid(origin, axes, shape=shape)
+ gaussians = gaussian_func(grid.points)
+ result = qtaim(grid, gaussians, bounding_box=False)
+
+ assert result["basin_cont"].shape[1] == 2
+ for i, pt in enumerate(grid.points):
+ basin_assigned = result["basin_cont"][i].argmax() + 1
+ if basin_assigned == 1:
+ # Assert it is part of basin one.
+ assert pt[0] <= 0.0
+ elif basin_assigned == 2:
+ # assert it is part of basin two.
+ assert pt[0] >= 0.0
+
+
+@pytest.mark.parametrize("shape", np.random.randint(5, 30, size=(3, 3)))
+def test_basin_are_correctly_assigned_against_accurate_scipy_ode_solver(shape):
+ centers = np.array([[-1.5, 0, 0], [1.5, 0, 0]])
+ gaussian_func = lambda pts: np.exp(-np.linalg.norm(pts - centers[0], axis=1)**2.0) + \
+ np.exp(-np.linalg.norm(pts - centers[1], axis=1)**2.0)
+
+ gradient_func = lambda pts: (
+ -2.0 * ((pts - centers[0]) * np.exp(-np.linalg.norm(pts - centers[0], axis=1) ** 2.0).T
+ + (pts - centers[1]) * np.exp(-np.linalg.norm(pts - centers[1], axis=1)**2.0).T)
+ )
+
+ # Define Grid and evaluate the density on the grid
+ origin = np.array([-1.5, -1.5, -1.5])
+ shape = np.array(shape)
+ axes = np.eye(3) * (1.5 + 1.5) / (shape - 1).T
+ grid = UniformGrid(origin, axes, shape=shape)
+ gaussians = gaussian_func(grid.points)
+
+ # Do qtaim on the grid.
+ result = qtaim(grid, gaussians, bounding_box=False, grad_func=gradient_func)
+ maximas = grid.points[result["maxima_indices"]]
+
+ # take random sample of points
+ numb_samples = 1000
+ sample_indices = np.random.randint(0, grid.points.shape[0], size=numb_samples)
+ for i_samp in sample_indices:
+ pt_samp = grid.points[i_samp]
+
+ basin_weights = result["basin_cont"][i_samp].toarray()
+
+ # Only check with points that are not on the zero-flux surface.
+ if np.all(np.abs(basin_weights - 0.5) > 0.01) and np.abs(pt_samp[0]) > axes[0, 0]:
+ sol = solve_ivp(
+ lambda t, x: gradient_func(np.array([x]))[0].T,
+ y0=pt_samp,
+ t_span=(0, 1000),
+ method="DOP853",
+ max_step=50
+ )
+ # print("solution ", sol, " maximas ", )
+ print("Pt Sample", pt_samp, "Basin of it ", result["basin_cont"][i_samp], basin_weights)
+
+ # basin assigned by the algorithnm
+ basin_assigned = result["basin_cont"][i_samp].toarray().argmax() + 1
+ # basin assigned by the ode
+ basin_assigned_ode = np.linalg.norm(sol["y"][:, -1] - maximas, axis=1).argmin() + 1
+ print(basin_assigned, basin_assigned_ode)
+ assert basin_assigned == basin_assigned_ode
+
+
+@pytest.mark.parametrize("num_pts", np.random.randint(4, 8, size=(200,)))
+def test_get_area_of_coplanar_points_against_scipy_convexhull(num_pts):
+ r"""Test finding the area of coplanar points against SciPy convex hull algorithm."""
+ # seems that this is only accurate up to seven points, couldn't get it working past 7
+ # unless the convex, coplanar polygon was a "nice" polygon.
+ origin, pt = np.random.random((2,3))
+ vertices = np.zeros((num_pts, 3))
+ vertices[0] = pt
+
+ # Rotate the points from finding a rotation matrix that rotates based on total_deg
+ total_deg = 360 / (num_pts + np.random.randint(2, 9))
+ # rotate x,y,z by total_deg
+ rot_mat = Rotation.from_euler('xyz', [total_deg, total_deg, total_deg], degrees=True)
+ rot_mat = rot_mat.as_matrix()
+ for i in range(1, num_pts):
+ vertices[i] = origin + rot_mat.dot(vertices[i - 1] - origin)
+ desired = _get_area_of_coplanar_polygon(vertices)
+ convex = ConvexHull(vertices, qhull_options="QJ")
+ print(desired, convex.area, convex.area / 2.0)
+ assert np.abs(desired - convex.area / 2.0) < 1e-8
+
+
+def test_get_area_of_coplanar_points_against_perfect_shapes():
+ r"""Test get area of copolanar against squares and rectangles."""
+ square = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]]) * 2
+ desired = _get_area_of_coplanar_polygon(square)
+ print(desired, ConvexHull(square, qhull_options="QJ").area / 2.0)
+ assert np.abs(desired - ConvexHull(square, qhull_options="QJ").area / 2.0) < 1e-8
+ assert np.abs(desired - 2 * 2) < 1e-8
+
+ # Rotate square
+ rot_matrix = special_ortho_group.rvs(3)
+ square = square.dot(rot_matrix)
+ desired = _get_area_of_coplanar_polygon(square)
+ print(desired, ConvexHull(square, qhull_options="QJ").area / 2.0)
+ assert np.abs(desired - ConvexHull(square, qhull_options="QJ").area / 2.0) < 1e-8
+ assert np.abs(desired - 2 * 2) < 1e-8
+
+ # Test on rectangle
+ rectangle = np.array([[0, 0, 0], [1, 0, 0], [1, 5, 0], [0, 5, 0]])
+ desired = _get_area_of_coplanar_polygon(rectangle)
+ print(desired, ConvexHull(rectangle, qhull_options="QJ").area / 2.0)
+ assert np.abs(desired - ConvexHull(rectangle, qhull_options="QJ").area / 2.0) < 1e-8
+ assert np.abs(desired - 5) < 1e-8
+
+ # Rotate rectangle
+ rectangle = rectangle.dot(rot_matrix)
+ desired = _get_area_of_coplanar_polygon(rectangle)
+ print(desired, ConvexHull(rectangle, qhull_options="QJ").area / 2.0)
+ assert np.abs(desired - ConvexHull(rectangle, qhull_options="QJ").area / 2.0) < 1e-8
+ assert np.abs(desired - 5) < 1e-8
+
+
+@pytest.mark.parametrize("shape", np.random.randint(12, 30, size=(10, 3)))
+def test_qtaim_cubic_vs_qtaim_voronoi_algorithms(shape):
+ r"""Test QTAIM algorithm using a cubic grid and voronoi style."""
+ centers = np.array([[-1, 0, 0], [1, 0, 0]])
+ # multiply by 0.98 to order the points uniquely when you sort in the qtaim algorithm.
+ gaussian_func = lambda pts: np.exp(-np.linalg.norm(pts - centers[0], axis=1)) + \
+ 0.99 * np.exp(-np.linalg.norm(pts - centers[1], axis=1))
+
+ # Define Grid and evaluate the density on the grid
+ origin = np.array([-1.5, -1.5, -1.5])
+ shape = np.array(shape)
+ axes = np.eye(3) * (1.5 + 1.5) / (shape - 1).T
+ grid = UniformGrid(origin, axes, shape=shape)
+ gaussians = gaussian_func(grid.points)
+
+ result_voronoi = qtaim(grid.points, gaussians, num_centers=2)
+ result_cubic = qtaim(grid, gaussians, num_centers=2)
+
+ assert result_voronoi["basin_cont"].shape[1] == 2
+ assert result_cubic["basin_cont"].shape[1] == 2
+
+ indices = np.argsort(gaussians)[::-1]
+ gaussians = gaussians[indices]
+ for i, pt in enumerate(grid.points[indices, :]):
+ # Points on the boundary of the cube may have different areas
+ if np.all(np.abs(np.abs(pt) - 1.5) > 1e-4):
+ basin_weights_cubic = result_cubic["basin_cont"][indices[i]].toarray()
+ basin_weights_voronoi = result_voronoi["basin_cont"][indices[i]].toarray()
+ print(i, indices[i], pt, basin_weights_cubic, basin_weights_voronoi, gaussians[i])
+ assert np.all(np.abs(basin_weights_cubic - basin_weights_voronoi) < 1e-8)
+ else:
+ # Atleast check if their assigned basins are the same.
+ basin_weights_cubic = result_cubic["basin_cont"][indices[i]].toarray().argmax()
+ basin_weights_voronoi = result_voronoi["basin_cont"][indices[i]].toarray().argmax()
+ assert basin_weights_cubic == basin_weights_voronoi
+
+
+@pytest.mark.parametrize("use_gradient", [True, False])
+def test_integral_of_gaussians_using_cubic_grid_up_to_three_decimal(use_gradient):
+ r"""Test the integral of the basins of two Gaussians that are far apart."""
+ centers = np.array([[-1, 0, 0], [1, 0, 0]])
+ # multiply by 0.98 to order the points uniquely when you sort in the qtaim algorithm.
+ alpha = 30
+ gaussian_func = lambda pts: np.exp(-alpha * np.linalg.norm(pts - centers[0], axis=1)**2.0) + \
+ 0.99 * np.exp(-alpha * np.linalg.norm(pts - centers[1], axis=1)**2.0)
+
+
+ gradient_func = lambda pts: (
+ -2.0 * ((pts - centers[0]) * np.exp(-np.linalg.norm(pts - centers[0], axis=1) ** 2.0).T
+ + (pts - centers[1]) * np.exp(-np.linalg.norm(pts - centers[1], axis=1)**2.0).T)
+ )
+
+ # Define Grid and evaluate the density on the grid
+ origin = np.array([-1.5, -1.5, -1.5])
+ shape = np.array([50, 45, 40])
+ axes = np.eye(3) * (1.5 + 1.5) / (shape - 1).T
+ print(axes)
+ grid = UniformGrid(origin, axes, shape=shape, weight="Rectangle")
+ gaussians = gaussian_func(grid.points)
+
+ if use_gradient:
+ result_voronoi = qtaim(grid, gaussians, num_centers=2, grad_func=gradient_func)
+ else:
+ result_voronoi = qtaim(grid, gaussians, num_centers=2, grad_func=None)
+
+ for i in range(2):
+ integral = grid.integrate(result_voronoi["basin_cont"][:, i].toarray().ravel() * gaussians)
+ print(integral)
+ factor = 1.0 if i == 0 else 0.99
+ print(np.sqrt(np.pi / alpha)**3.0 * factor)
+ assert np.abs(integral - factor * np.sqrt(np.pi / alpha)**3.0) < 1e-6
+
+
+def test_qtaim_line_search():
+ r"""TODO."""
+ centers = np.array([[-1, 0, 0], [1, 0, 0]])
+ # multiply by 0.98 to order the points uniquely when you sort in the qtaim algorithm.
+ alpha = 3
+ gaussian_func = lambda pts: np.exp(-alpha * np.linalg.norm(pts - centers[0], axis=1)**2.0) + \
+ np.exp(-alpha * np.linalg.norm(pts - centers[1], axis=1)**2.0)
+
+
+ gradient_func = lambda pts: (
+ -2.0 * alpha * (
+ (pts - centers[0]) * np.exp(-alpha * np.linalg.norm(pts - centers[0], axis=1) ** 2.0).T
+ + (pts - centers[1]) * np.exp(-alpha * np.linalg.norm(pts - centers[1], axis=1)**2.0).T
+ )
+ )
+
+
+ oned = np.arange(1e-4, 2, 0.1)
+ rgrid = OneDGrid(oned, np.ones(len(oned)) * 0.5)
+ iso_val = 1e-5
+ result = qtaim_surface(rgrid, 10, centers, gaussian_func, gradient_func,
+ iso_val=iso_val,
+ bnd_err=1e-5,
+ iso_err=1e-6, dens_cutoff=1e-9, beta_spheres=[0.8, 0.8])
+ # import matplotlib
+ # import matplotlib.pyplot as plt
+ # from mpl_toolkits import mplot3d
+ # matplotlib.use("Qt5Agg")
+ # fig = plt.figure()
+ # ax = plt.axes(projection='3d')
+ # q = result.generate_pts_on_surface(0)
+ # p = result.get_ias_pts_of_basin(0)
+ # ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="k")
+ # p = result.get_oas_pts_of_basin(0)
+ # ax.scatter(p[:, 0], p[:, 1], p[:, 2], color="r")
+ # plt.show()
+
+
+
+class TestQTAIMSurfaceOnTwoBodyGaussian():
+ def gaussian_func(self, pts, centers, alpha):
+ return np.exp(-alpha * np.linalg.norm(pts - centers[0], axis=1)**2.0) + \
+ np.exp(-alpha * np.linalg.norm(pts - centers[1], axis=1)**2.0)
+
+ def gradient_gaussian(self, pts, centers, alpha):
+ fac1 = np.exp(-alpha * np.linalg.norm(pts - centers[0], axis=1)**2.0).T# * np.linalg.norm(pts - centers[0], axis=1)**2.0
+ fac2 = np.exp(-alpha * np.linalg.norm(pts - centers[1], axis=1)**2.0).T# * np.linalg.norm(pts - centers[1], axis=1)**2.0
+ sol = (
+ -2.0 * alpha *
+ (
+ (pts - centers[0]) * fac1[:, None] + (pts - centers[1]) * fac2[:, None]
+ )
+ )
+ return sol
+
+ @pytest.mark.parametrize(
+ "centers", [
+ np.array([[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]),
+ np.array([[0.0, -0.75, 0.], [0.0, 0.75, 0.]]),
+ np.vstack((np.random.uniform(-1, 0.5, size=(3,)), np.random.uniform(0.5, 1, size=(3,))))
+ ],
+ )
+ @pytest.mark.parametrize("iso_val", [1e-5, 1e-4, 1e-2])
+ def test_outer_atomic_surface_has_correct_isosurface_values(self, centers, iso_val):
+ r"""Test outer atomic surface has correct isosurface value."""
+ rgrid = GaussLaguerre(15)
+ alpha = 2
+ gaussian_func = lambda pts: self.gaussian_func(pts, centers, alpha)
+ gradient_func = lambda pts: self.gradient_gaussian(pts, centers, alpha)
+ iso_err = 1e-6
+ result = qtaim_surface(rgrid, 15, centers, gaussian_func, gradient_func,
+ iso_val=iso_val,
+ bnd_err=1e-5, iso_err=iso_err, dens_cutoff=1e-9,
+ optimize_centers=True)
+ # Test that the outer surface gives the correct
+ for i in range(0, 2):
+ oas_0 = result.get_oas_pts_of_basin(i)
+ np.set_printoptions(threshold=np.inf)
+ print(gaussian_func(oas_0))
+ assert np.all(np.abs(gaussian_func(oas_0) - iso_val) < iso_err)
+
+ @pytest.mark.parametrize("beta_sphere", [None, [0.8, 0.8]])
+ @pytest.mark.parametrize("bnd_err", [1e-5, 1e-3])
+ @pytest.mark.parametrize("alpha,refine", [[5, True], [1, True], [0.8, False]])
+ def test_inner_atomic_surface_is_correct_on_simple_example(
+ self, beta_sphere, bnd_err, alpha, refine
+ ):
+ r"""Test inner atomic surface lies exactly on x-axis on this example."""
+ centers = np.array([[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]])
+ gaussian_func = lambda pts: self.gaussian_func(pts, centers, alpha)
+ gradient_func = lambda pts: self.gradient_gaussian(pts, centers, alpha)
+
+ rgrid = GaussLaguerre(10)
+ result = qtaim_surface(rgrid, 10, centers, gaussian_func, gradient_func,
+ iso_val=1e-4,
+ bnd_err=bnd_err, iso_err=1e-6, dens_cutoff=1e-9,
+ optimize_centers=True, refine=refine)
+ for i in range(0, 2):
+ ias_0 = result.get_ias_pts_of_basin(i)
+ assert np.all(np.abs(ias_0[:, 0]) < bnd_err)
+
+ @pytest.mark.parametrize(
+ "centers, refine", [
+ [np.array([[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]), True],
+ [np.vstack(
+ (np.random.uniform(-1, -0.1, size=(3,)), np.random.uniform(0.1, 1, size=(3,)))
+ ), False],
+ [np.vstack(
+ (np.random.uniform(-1, -0.1, size=(3,)), np.random.uniform(0.1, 1, size=(3,)))
+ ), True],
+ [np.vstack(
+ (np.random.uniform(-1, -0.1, size=(3,)), np.random.uniform(0.1, 1, size=(3,)))
+ ), False]
+ ],
+ )
+ def test_outer_atomic_surface_is_correctly_assigned_to_basin(self, centers, refine):
+ alpha = 0.75
+ gaussian_func = lambda pts: self.gaussian_func(pts, centers, alpha)
+ gradient_func = lambda pts: self.gradient_gaussian(pts, centers, alpha)
+
+ rgrid = OneDGrid(np.arange(0., 5, 0.5), np.arange(0., 5, 0.5))
+ result = qtaim_surface(rgrid, 15, centers, gaussian_func, gradient_func,
+ iso_val=1e-4,
+ bnd_err=1e-6, iso_err=1e-6, dens_cutoff=1e-6,
+ refine=refine, optimize_centers=True)
+
+ # Test that points on oas all converge to the maxima and no other.
+ for i in range(0, 2):
+ oas_0 = result.get_oas_pts_of_basin(i)
+
+ for pt in oas_0:
+ sol = solve_ivp(
+ lambda t, x: gradient_func(np.array([x]))[0].T,
+ y0=pt,
+ t_span=(0, 10000),
+ method="Radau", # DOP853
+ max_step=10,
+ atol=1e-9,
+ rtol=1e-5
+ )
+
+ print(pt, sol["y"][:, -1], centers)
+ assert np.all(np.abs(sol["y"][:, -1] - result.maximas[i]) < 1e-2)
+
+ def test_integration_of_basin(self):
+ centers = np.array([[-1, 0, 0], [1, 0, 0]])
+ alpha = 3
+ gaussian_func = lambda pts: self.gaussian_func(pts, centers, alpha)
+ gradient_func = lambda pts: self.gradient_gaussian(pts, centers, alpha)
+
+ oned = np.arange(0.0, 2, 0.1)
+ rgrid = OneDGrid(oned, np.ones(len(oned)) * 0.1)
+ # rgrid = GaussLaguerre(20)
+ result = qtaim_surface(rgrid, 25, centers, gaussian_func, gradient_func,
+ iso_val=1e-6, bnd_err=1e-5, iso_err=1e-6, dens_cutoff=1e-9,
+ beta_spheres=[0.8, 0.8])
+
+ # Change integration grid to something that is more accurate.
+ oned = Trapezoidal(100)
+ rgrid = LinearFiniteRTransform(0, 2).transform_1d_grid(oned)
+
+ # Test integration
+ desired = np.sqrt(np.pi / alpha) ** 3.0
+ for i in range(2):
+ atomgrid_basin_0 = result.get_atom_grid_over_basin(i, rgrid)
+ true = atomgrid_basin_0.integrate(gaussian_func(atomgrid_basin_0.points))
+ assert np.abs(true - desired) < 1e-3
+
+
+@pytest.mark.parametrize(
+ "mol_fchk, degs",
+ [
+ # ("h2o.fchk", [70, 30, 30]),
+ ("nh3.fchk", [70, 30, 30, 30]),
+ # ("atom_kr.fchk", [20]),
+ # ("ch4.fchk", [70, 30, 30, 30, 30])
+ ]
+)
+def test_density_and_laplacian(mol_fchk, degs):
+ r"""Test the integration of laplacian is zero over basin and electron density integration."""
+ file_path = pathlib.Path(__file__).parent.resolve().__str__()[:-8]
+ file_path += "data/examples/" + mol_fchk
+
+ # from chemtools.wrappers import Molecule
+ # mol = Molecule.from_file(file_path)
+ # centers = mol.coordinates
+ # gaussian_func = lambda pts: mol.compute_density(pts)
+ # gradient_func = lambda pts: mol.compute_gradient(pts)
+ import gbasis_cuda
+ from iodata import load_one
+ centers = load_one(file_path).atcoords
+ mol = gbasis_cuda.Molecule(file_path)
+ gaussian_func = lambda pts: mol.compute_electron_density(pts)
+ gradient_func = lambda pts: mol.compute_electron_density_gradient(pts)
+
+ # result = qtaim_surface_vectorize(degs, centers, gaussian_func, gradient_func,
+ # iso_val=1e-10, bnd_err=1e-5, iso_err=1e-6, optimize_centers=True)
+ result = qtaim_surface(70, centers, gaussian_func, gradient_func, iso_val=1e-10, bnd_err=1e-5, iso_err=1e-6, optimize_centers=True,
+ dens_cutoff=1e-12)
+ result.save("delete_test.npz")
+ assert 1 == 0
+ # Test Laplacian and density
+ numb = 500
+ for numb in [500, 600, 700, 800]:
+ print("NUMB ", numb)
+ # oned = GaussChebyshev(numb)
+ from grid.onedgrid import GaussLegendre, UniformInteger
+ oned = UniformInteger(numb)
+ print("Number Radial Points ", numb)
+ density_integral = 0.0
+ laplacian_integral = 0.0
+ for i in range(len(centers)):
+ if mol.numbers[i] == 1:
+ a,b = 1e-8, np.max(result.r_func[i][result.oas[i]])
+ elif mol.numbers[i] == 6:
+ a, b = 3.467e-10, 42.44372
+ elif mol.numbers[i] == 7:
+ a, b = 1.2e-10, 38.1743
+ elif mol.numbers[i] == 8:
+ a, b = 1.8e-10, 22.2270
+ b = np.max(result.r_func[i][result.oas[i]])
+
+ rgrid = PowerRTransform(a, b).transform_1d_grid(oned)
+ print(rgrid.points[-10:], )
+ atomgrid_basin_0 = result.get_atom_grid_over_basin(i, rgrid)
+ laplacian = 0.25 * mol.compute_laplacian(atomgrid_basin_0.points)
+ integral = atomgrid_basin_0.integrate(laplacian)
+
+ print("Laplacian Integral ", integral)
+ laplacian_integral += integral
+ assert np.abs(integral) < 1e-3, "Laplacian Integral should be close to zero."
+
+ dens = mol.compute_density(atomgrid_basin_0.points)
+ print("Density Integral ", atomgrid_basin_0.integrate(dens))
+ density_integral += atomgrid_basin_0.integrate(dens)
+ print()
+ print("Total Density Integral ", density_integral)
+ print("Total Laplacian Integral ", laplacian_integral)
+ assert np.abs(density_integral - np.sum(mol.numbers)) < 1e-2
diff --git a/chemtools/topology/utils.py b/chemtools/topology/utils.py
new file mode 100644
index 00000000..4a133068
--- /dev/null
+++ b/chemtools/topology/utils.py
@@ -0,0 +1,627 @@
+# -*- coding: utf-8 -*-
+# ChemTools is a collection of interpretive chemical tools for
+# analyzing outputs of the quantum chemistry calculations.
+#
+# Copyright (C) 2016-2019 The ChemTools Development Team
+#
+# This file is part of ChemTools.
+#
+# ChemTools is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# ChemTools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see
+#
+# --
+from grid.cubic import UniformGrid
+import numpy as np
+from scipy.spatial.distance import cdist
+from scipy.optimize import root_scalar
+
+from chemtools.topology.ode import find_basins_steepest_ascent_rk45, steepest_ascent_rk45, gradient_path
+
+r"""
+Utility functions that is common between the QTAIM algorithms.
+"""
+
+__all__ = [
+ "solve_for_oas_points",
+ "construct_radial_grids",
+ "find_optimize_centers",
+ "determine_beta_spheres_and_nna",
+ "solve_intersection_of_ias_point",
+ "solve_intersection_of_ias_point_hirshfeld",
+]
+
+
+def solve_for_isosurface_pt(
+ l_bnd, u_bnd, maxima, cart_sphere_pt, density_func, iso_val, iso_err
+):
+ r"""
+ Solves for the point on a ray that satisfies the isosurface value equation.
+ .. math::
+ f(r) := \rho(\textbf{A} + r \textbf{\theta}) - c,
+ where A is the position of the atom, :math:`\theta` is the Cartesian coordinates of the
+ point on the sphere, r is the radius, and c is the isosurface value. The radius
+ is solved using a root-finding algorithm over an interval that contains the isosurface
+ value.
+ Parameters
+ ----------
+ l_bnd: float
+ The lower-bound on the radius for the root-solver. Needs to be less than the
+ isosurface value.
+ u_bnd: float
+ The upper-bound on the radius for the root-solver. Needs to be greater than the
+ isosurface value.
+ maxima: ndarray(3,)
+ The maximum of the atom.
+ cart_sphere_pt: ndarray(3,)
+ The Cartesian coordinates of the point on the sphere.
+ density_func: callable(ndarray(M,3), ndarray(M,))
+ The electron density function.
+ iso_val: float
+ The isosurface value.
+ iso_err: float
+ The xtol for the root-solver.
+ Returns
+ -------
+ ndarray(3,):
+ The point :math:`\textbf{A} + r \textbf{\theta}` that satisfies the isosurface value.
+ """
+ # Given a series of points based on a maxima defined by angles `cart_sphere_pt` with
+ # radial pts `rad_pts`. The `index_iso` tells us where on these points to construct another
+ # refined grid from finding l_bnd and u_bnd.
+ bounds_found = False
+ while not bounds_found:
+ dens_l_bnd = density_func(np.array([maxima + l_bnd * cart_sphere_pt]))
+ dens_u_bnd = density_func(np.array([maxima + u_bnd * cart_sphere_pt]))
+ if iso_val < dens_u_bnd or dens_l_bnd < iso_val:
+ if iso_val < dens_u_bnd:
+ l_bnd = u_bnd
+ u_bnd += 1.5
+ elif dens_l_bnd < iso_val:
+ u_bnd = l_bnd
+ l_bnd -= 1.5
+ # raise ValueError(f"Radial grid {l_bnd, u_bnd} did not bound {dens_l_bnd, dens_u_bnd} "
+ # f"the isosurface value {iso_val}. Use larger radial grid.")
+ else:
+ bounds_found = True
+
+ # Use Root-finding algorithm to find the isosurface point.
+ root_func = lambda t: density_func(np.array([maxima + t * cart_sphere_pt]))[0] - iso_val
+ sol = root_scalar(root_func, method="toms748", bracket=(l_bnd, u_bnd), xtol=iso_err)
+ assert sol.converged, f"Root function did not converge {sol}."
+ bnd_pt = maxima + sol.root * cart_sphere_pt
+ return bnd_pt
+
+
+def _solve_root_newton_raphson(initial_guess, angular_pts, root_and_grad, xtol, maxiter=1000):
+ not_converged = np.arange(len(initial_guess))
+ niter = 0 # Number of iterations
+ success = True
+ pts0 = initial_guess.copy()
+ pts_converged = np.zeros(len(initial_guess))
+ while len(not_converged) != 0:
+ if niter == maxiter:
+ success = False
+ break
+ dens_pts, grad_pts = root_and_grad(pts0, angular_pts[not_converged])
+
+ pts1 = pts0 - dens_pts / grad_pts
+ indices_converged = np.where(np.abs(pts1 - pts0) <= xtol)[0]
+ if len(indices_converged) != 0:
+ pts_converged[not_converged[indices_converged]] = pts0[indices_converged]
+
+ not_converged = np.delete(not_converged, indices_converged)
+ pts0 = pts1
+ pts0 = np.delete(pts0, indices_converged)
+ niter += 1
+
+ return pts_converged, success
+
+
+def _solve_root_grid_movement(initial_guess, angular_pts, root_func, iso_err, xtol=1e-3, maxiter=10000):
+ not_converged = np.arange(len(initial_guess))
+ niter = 0 # Number of iterations
+ success = True
+ pts0 = initial_guess.copy()
+ root_pts = root_func(pts0, angular_pts[not_converged])
+ pts_converged = np.zeros(len(initial_guess))
+ while len(not_converged) != 0:
+ if niter == maxiter:
+ success = False
+ break
+
+ # Take a step based on the sign of the root equation, and take the step-size to be minimum of xtol
+ # and root_pts, i.e. you want the step-size to be smaller when you're closer to the root.
+ pts1 = pts0 + np.sign(root_pts) * np.minimum(xtol, root_pts * 10)
+ root_pts = root_func(pts1, angular_pts[not_converged])
+
+ indices_converged = np.where(np.abs(root_pts) < iso_err)[0]
+ if len(indices_converged) != 0:
+ pts_converged[not_converged[indices_converged]] = pts1[indices_converged]
+
+ not_converged = np.delete(not_converged, indices_converged)
+ pts0 = pts1
+ pts0 = np.delete(pts0, indices_converged)
+ root_pts = np.delete(root_pts, indices_converged)
+ niter += 1
+
+ return pts_converged, success
+
+
+def solve_for_oas_points(
+ maximas, maximas_to_do, oas, angular_pts, dens_func, iso_val, iso_err, r_func
+):
+ r"""
+ For each index in outer-atomic surface (OAS) solves for the isovalue point along a ray.
+
+ This is stored inside `r_func`. Solves for the point on a ray that satisfies the isosurface value equation.
+
+ .. math::
+ f(r) := \rho(\textbf{A} + r \textbf{\theta}) - c,
+
+ where A is the position of the atom, :math:`\theta` is the Cartesian coordinates of the
+ point on the sphere, r is the radius, and c is the isosurface value. The radius
+ is solved using a root-finding algorithm over an interval that contains the isosurface
+ value.
+
+ Parameters
+ ----------
+ maximas: ndarray(M, 3)
+ All maximas of the density
+ maximas_to_do: list[int]
+ List of indices to solve for the OAS for each maximas.
+ oas: list[list]
+ List of indices that correspond to angular points whose ray intersect the isosurface
+ of the electron density.
+ radial_grid: list[ndarray]
+ List of radial grids (arrays on zero to infinity) correspond to each maxima.
+ angular_pts: list[ndarray]
+ The angular points on the sphere for each maxima.
+ dens_func: callable()
+ The electron density function.
+ iso_val: float
+ The isosurface value that is to be solved
+ iso_err: float
+ The isosurface error
+ r_func: list[ndarray()]
+ This holds the radial coordinate on the ray that intersects the OAS.
+
+ """
+ for i_maxima in maximas_to_do:
+ print("ATOM ", i_maxima)
+ maxima = maximas[i_maxima]
+ ang_pts = angular_pts[i_maxima]
+ initial_guess = np.array([0.2] * len(oas[i_maxima]))
+
+ def root_and_grad(t, angular_pts_max):
+ # Using logarithm increases accuracy and convergence as Newton-Ralphson has quadratic convergence.
+ real_pts = maxima + t[:, None] * angular_pts_max
+ dens_pts = dens_func(real_pts)
+ return dens_pts - iso_val
+
+ sol_x, success = _solve_root_grid_movement(
+ initial_guess, ang_pts[oas[i_maxima]], root_and_grad, xtol=0.01, iso_err=iso_err
+ )
+ radial_results = sol_x
+ sol_fun = dens_func(maxima + sol_x[:, None] * ang_pts[oas[i_maxima]])
+ # The points that weren't successful, try again.
+ if not success:
+ # Get rid of the points that converge, and re-try with the points that didn't.
+ print("Try solving the root equations for OAS again on individual points.")
+ indices = np.where(np.abs(sol_fun) > iso_err)[0]
+
+ for i_oas in indices:
+ oas_pt = solve_for_isosurface_pt(
+ radial_results[i_oas] - 0.1, radial_results[i_oas] + 0.1, maxima, ang_pts[i_oas],
+ dens_func, iso_val, iso_err
+ )
+ radial_results[i_oas] = np.linalg.norm(oas_pt - maxima)
+ r_func[i_maxima][oas[i_maxima]] = radial_results
+
+
+def find_optimize_centers(centers, grad_func):
+ maximas = np.array(
+ [gradient_path(x, grad_func, t_span=(0, 10), method="BDF",
+ first_step=1e-9, max_step=1e-1) for x in centers],
+ dtype=float
+ )
+ print("New maximas: \n ", maximas)
+ # Check for duplicates
+ distance = cdist(maximas, maximas)
+ distance[np.diag_indices(len(maximas))] = 1.0 # Set diagonal elements to one
+ if np.any(distance < 1e-6):
+ raise RuntimeError(f"Optimized maximas contains duplicates: \n {maximas}.")
+ return maximas
+
+
+def construct_radial_grids(pts1, maximas, min_pts=0.2, pad=5.0, ss0=0.23):
+ r"""
+ Construct radial grids around each maxima depending on its neighbors
+
+ Parameters
+ ----------
+ pts: ndarray(M_1, 3)
+ Coordinates of the points to construct radial grids on.
+ maximas: ndarray(M, 3)
+ Coordinates of the maximas.
+ min_pts: Union[float, list]
+ The minimum radial value on [0, \infty) to construct a radial grid. If it is a list,
+ it should be of size `M`.
+ pad: float
+ Extra padding to add to make sure the radial grid covers the intersection
+ with the inter-atomic and outer-atomic surfaces.
+ ss0: float
+ The step-size of the uniform radial grid.
+
+
+ Returns
+ -------
+ list[ndarray]:
+ List of radial grid of length `M_1`. The radial grid are uniform
+ grids that start at `min` and end on the maximum distance to the fifth atom plus
+ an extra padding with stepsize `ss0`.
+
+ """
+ # Construct a radial grid for each atom by taking distance to the closest five atoms.
+ # Added an extra padding in the case of carbon in CH4
+ # TODO: the upper-bound should depend on distance to isosurface value and distance
+ # between atoms
+ if isinstance(min_pts, (float, float)):
+ # If it is a float, convert it to a list.
+ min_pts = [min_pts] * len(maximas)
+ dist_maxs = cdist(pts1, maximas)
+ sorted_dists = np.sort(dist_maxs, axis=1)
+ distance_maximas = sorted_dists[:, min(5, maximas.shape[0] - 1)]
+ distance_minimas = sorted_dists[:, 1] / 4.0
+ radial_grid = [
+ np.arange(min(min_pts[i], distance_minimas[i]), x + pad, ss0) for i, x in enumerate(distance_maximas)
+ ]
+ return radial_grid
+
+
+def determine_beta_spheres_and_nna(
+ beta_spheres, maximas, radial_grids, angular_pts, dens_func, grad_func, hess_func=None
+):
+ r"""
+
+ Notes this assumes the initial beta-sphere is 0.01, and so the distance between maximas
+ cannot be smaller than this.
+
+ """
+ numb_maximas = len(maximas)
+ initial_beta_sph = 0.01
+ if beta_spheres is None:
+ beta_spheres = [initial_beta_sph] * numb_maximas
+ # TODO: add the sphere angle trick.
+
+ # Determine the beta-spheres
+ i_maxima = 0
+ while i_maxima != len(maximas):
+ maxima = maximas[i_maxima]
+ if beta_spheres[i_maxima] == initial_beta_sph:
+ for rad_pt in radial_grids[i_maxima]:
+ if rad_pt > initial_beta_sph:
+ # Determine the points on the sphere with this radius
+ pts = maxima + rad_pt * angular_pts
+ # You want here for the ODE to be accurate in-order to find potential NNA.
+ if hess_func is None:
+ basins, _ = find_basins_steepest_ascent_rk45(
+ pts, dens_func, grad_func, beta_spheres, maximas, ss_0=0.1, max_ss=0.2, tol=1e-9,
+ hess_func=hess_func
+ )
+ else:
+ basins, maximas = find_basins_steepest_ascent_rk45(
+ pts, dens_func, grad_func, beta_spheres, maximas, ss_0=0.2, max_ss=0.2, tol=1e-9,
+ hess_func=hess_func, check_for_nna=True
+ )
+ basins = np.array(basins, dtype=int)
+
+ which_nna = np.where(basins >= numb_maximas)[0]
+ if len(which_nna) != 0:
+ # TODO: Easy way to determin min_pt is to take minimum distance between NNA and other maximas.
+ # Copy a radial grid from the previous method
+ radial_grids += \
+ construct_radial_grids(maximas[numb_maximas:], maximas[:numb_maximas],
+ min_pts=0.01, pad=5.0, ss0=0.01)
+
+ print(maximas)
+ print(beta_spheres)
+ beta_spheres += [initial_beta_sph] * (len(maximas) - numb_maximas)
+ numb_maximas = len(maximas)
+ #input("Found NNA")
+
+ # If all the basins went to same maxima, then update radius
+ # If the first radial point didn't suffice(e.g. NNA), then set the beta-sphere to something small.
+ # else then break out of this for loop.
+ if np.all(basins == i_maxima):
+ optimal_rad = rad_pt
+ beta_spheres[i_maxima] = optimal_rad
+ print(beta_spheres)
+ print("Optimal radius is ", optimal_rad)
+ elif np.abs(rad_pt - radial_grids[i_maxima][0]) < 1e-8:
+ beta_spheres[i_maxima] = min(rad_pt / 4.0, 0.3)
+ break
+ else:
+ break
+ print(f"i Maxima {i_maxima} and Final optimal radius {beta_spheres[i_maxima]}")
+ i_maxima += 1
+ # input("next maxima")
+ return beta_spheres, maximas, radial_grids
+
+
+def find_non_nuclear_attractors(maximas, dens_func, grad_func, hess_func):
+ r"""
+ Finds non-nuclear attractors up to two decimal places.
+
+ Returns
+ -------
+ ndarray(M+K, 3)
+ Returns the original `M` maximas from `maximas` and adds `K` new maximas that are
+ non-nuclear attractors.
+
+ """
+ grid = UniformGrid.from_molecule(
+ [6.0] * len(maximas), maximas, spacing=0.1, rotate=False, extension=2.0,
+ )
+
+ dens_vals = dens_func(grid.points)
+ indices2 = np.where(dens_vals > 0.001)[0]
+ grads_vals = grad_func(grid.points[indices2])
+ print(grads_vals)
+ indices = np.where(np.all(np.abs(grads_vals) < 0.01, axis=1))[0]
+ print(indices2)
+ print(indices)
+ if len(indices) != 0:
+ np.set_printoptions(threshold=np.inf)
+ pts = steepest_ascent_rk45(
+ grid.points[indices2][indices], dens_func, grad_func, tol_conv=1e-8
+ )
+ print(pts)
+ dist_maxima = cdist(pts, maximas)
+ print(dist_maxima)
+ beta_sph = dist_maxima <= [0.1] * len(maximas)
+ which_basins = np.where(beta_sph)
+
+ pts = np.delete(pts, which_basins[0], axis=0)
+ print("Pts ")
+ if len(pts) != 0:
+ pts = np.unique(np.round(pts, decimals=1), axis=0)
+ # print(pts)
+ nna_attractors = np.array(
+ [gradient_path(x, grad_func, t_span=(0, 30), method="BDF",
+ first_step=1e-9, max_step=1e-1) for x in pts],
+ dtype=float
+ )
+ # Round to two decimal places mostly due to empirical evidence of convergence of these ODE.
+ nna_attractors = np.unique(np.round(nna_attractors, 2), axis=0)
+ print(nna_attractors)
+ eigs = np.linalg.eigvalsh(hess_func(nna_attractors))
+ print("eigs", eigs)
+ which_is_nna = np.where(np.all(eigs < -1e-10, axis=1))[0]
+ print(which_is_nna)
+
+ maximas = np.vstack((maximas, nna_attractors[which_is_nna]))
+ print(maximas)
+ return maximas
+
+
+def solve_intersection_of_ias_point(
+ maximas, ias_indices, angular_pts,
+ dens_func, grad_func, beta_spheres, bnd_err, ias_lengths, ss_0, max_ss, tol, hess_func=None
+):
+ r"""
+ Solves the intersection of the ray to the inner-atomic surface.
+
+ A point is associated to each ray based on `ias_indices`. The basin value
+ is assigned to each point, and the point is moved along the ray until it keeps switching basins.
+ The process is further repeated with a smaller step-size until the distance between two
+ points on the ray is less than `bnd_err`.
+
+ Parameters
+ ----------
+ maximas: ndarray(M, 3)
+ Optimized centers of the electron density.
+ ias_indices: ndarray(N, 6)
+ Rows correspond to each ray that intersects the IAS.
+ First index is which index of maxima it originates from, then second
+ index is index of angular point/ray, third index is the lower bound radius
+ and fourth index is the upper-bound radius, fifth index is the step-size.
+ The fifth index holds which basin the IAS point switches to. Note it may
+ not be the true basin value that it switches to.
+ The sixth index holds which index of the `IAS` list it points to.
+ angular_pts: list[ndarray]
+ List of size `M` of the angular points over each maxima.
+ dens_func:
+ The density of electron density.
+ grad_func:
+ The gradient of the electron density.
+ beta_spheres: ndarray(M,)
+ Beta-spheres radius of each atom.
+ bnd_err: float
+ The error of the intersection of the IAS. When the distance to two consequent points
+ on the ray crosses different basins is less than this error, then the midpoint is accepted
+ as the final radius value.
+ ias_lengths: list[int]
+ List of length `M` atoms that contains the number of IAS points
+ ss_0: float, optional
+ The initial step-size of the ODE (RK45) solver.
+ max_ss: float, optional
+ Maximum step-size of the ODE (RK45) solver.
+ tol: float, optional
+ Tolerance for the adaptive step-size.
+
+ Return
+ -------
+ List[ndarray()], list[list[int]]:
+ The first list holds arrays that gives the radius value that intersects the IAS or OAS.
+ The second list of size `M`, holds which ias crosses which other basin.
+
+ """
+ if not isinstance(ias_indices, np.ndarray):
+ raise TypeError(f"The parameters to solve indices should be numpy array instead of {type(ias_indices)}.")
+ r_func = [np.zeros((len(angular_pts[i]),), dtype=float) for i in range(len(maximas))]
+ basin_ias = [[-1] * ias_lengths[i] for i in range(len(maximas))]
+
+ while len(ias_indices) != 0:
+ # Construct New Points
+ points = []
+ for (i_maxima, i_ang, l_bnd, u_bnd, _, _, _) in ias_indices:
+ # Take the midpoint of interval [l_bnd, u_bnd]
+ ray = maximas[int(i_maxima)] + (l_bnd + u_bnd) / 2.0 * angular_pts[int(i_maxima)][int(i_ang), :]
+ points.append(ray)
+ points = np.vstack(points)
+
+ # Solve for basins
+ basins, _ = find_basins_steepest_ascent_rk45(
+ points, dens_func, grad_func, beta_spheres, maximas, tol=tol, max_ss=max_ss, ss_0=ss_0, hess_func=hess_func
+ )
+ # print("Basins", basins)
+
+ # Refine the rays further
+ # print("Average step-size", np.mean(ias_indices[:, 4]))
+ converge_indices = []
+ # Note it always assumes that `l_bnd` goes to `i_maxima` and `u_bnd` goes to `basin_switch`.
+ for i, (i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch, i_ias) in enumerate(ias_indices):
+ #print((i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch))
+ if basins[i] == i_maxima:
+ # Calculate new step-size between [(l_bnd + u_bnd) / 2, u_bnd]
+ new_l_bnd = (l_bnd + u_bnd) / 2
+ new_u_bnd = u_bnd
+ else:
+ # Calculate new stepsize between [l_bnd, (l_bnd + u_bnd) / 2]
+ new_l_bnd = l_bnd
+ new_u_bnd = (l_bnd + u_bnd) / 2
+ # Update the basin that it switches to.
+ if basin_switch != basins[i]:
+ basin_switch = basins[i]
+ new_ss = (new_u_bnd - new_l_bnd)
+ # If new stepsize was less than bnd_err, then we converge and should stop.
+ if new_ss <= bnd_err:
+ r_func[int(i_maxima)][int(i_ang)] = (new_l_bnd + new_u_bnd) / 2
+ basin_ias[int(i_maxima)][int(i_ias)] = int(basin_switch)
+ converge_indices.append(i) # Put in list to remove indices.
+ else:
+ # Update ias_indices for the next iteration for example l_bnd, u_bnd, step-size
+ ias_indices[i] = [i_maxima, i_ang, new_l_bnd, new_u_bnd, new_ss, basin_switch, i_ias]
+
+ # Remove converged indices
+ # print("Convergence indices", converge_indices)
+ ias_indices = np.delete(ias_indices, converge_indices, axis=0)
+
+ # Solve for multiple intersections
+ return r_func, basin_ias
+
+
+def solve_intersection_of_ias_point_hirshfeld(
+ maximas, ias_indices, angular_pts, basin_assignment_callable, bnd_err, ias_lengths,
+):
+ r"""
+ Solves the intersection of the ray to the inner-atomic surface.
+
+ A point is associated to each ray based on `ias_indices`. The basin value
+ is assigned to each point, and the point is moved along the ray until it keeps switching basins.
+ The process is further repeated with a smaller step-size until the distance between two
+ points on the ray is less than `bnd_err`.
+
+ Parameters
+ ----------
+ maximas: ndarray(M, 3)
+ Optimized centers of the electron density.
+ ias_indices: ndarray(N, 6)
+ Rows correspond to each ray that intersects the IAS.
+ First index is which index of maxima it originates from, then second
+ index is index of angular point/ray, third index is the lower bound radius
+ and fourth index is the upper-bound radius, fifth index is the step-size.
+ The fifth index holds which basin the IAS point switches to. Note it may
+ not be the true basin value that it switches to.
+ The sixth index holds which index of the `IAS` list it points to.
+ angular_pts: list[ndarray]
+ List of size `M` of the angular points over each maxima.
+ dens_func:
+ The density of electron density.
+ grad_func:
+ The gradient of the electron density.
+ beta_spheres: ndarray(M,)
+ Beta-spheres radius of each atom.
+ bnd_err: float
+ The error of the intersection of the IAS. When the distance to two consequent points
+ on the ray crosses different basins is less than this error, then the midpoint is accepted
+ as the final radius value.
+ ias_lengths: list[int]
+ List of length `M` atoms that contains the number of IAS points
+ ss_0: float, optional
+ The initial step-size of the ODE (RK45) solver.
+ max_ss: float, optional
+ Maximum step-size of the ODE (RK45) solver.
+ tol: float, optional
+ Tolerance for the adaptive step-size.
+
+ Return
+ -------
+ List[ndarray()], list[list[int]]:
+ The first list holds arrays that gives the radius value that intersects the IAS or OAS.
+ The second list of size `M`, holds which ias crosses which other basin.
+
+ """
+ if not isinstance(ias_indices, np.ndarray):
+ raise TypeError(f"The parameters to solve indices should be numpy array instead of {type(ias_indices)}.")
+ r_func = [np.zeros((len(angular_pts[i]),), dtype=np.float64) for i in range(len(maximas))]
+ basin_ias = [[-1] * ias_lengths[i] for i in range(len(maximas))]
+
+ while len(ias_indices) != 0:
+ # Construct New Points
+ points = []
+ for (i_maxima, i_ang, l_bnd, u_bnd, _, _, _) in ias_indices:
+ # Take the midpoint of interval [l_bnd, u_bnd]
+ ray = maximas[int(i_maxima)] + (l_bnd + u_bnd) / 2.0 * angular_pts[int(i_maxima)][int(i_ang), :]
+ points.append(ray)
+ points = np.vstack(points)
+
+ # Solve for basins
+ # Take the initial step-size of ODE solver based on step-size of ss_0 and the interval of [l_bnd, u_bnd]
+ basins = basin_assignment_callable(points)
+ # print("Basins", basins)
+
+ # Refine the rays further
+ # print("Average step-size", np.mean(ias_indices[:, 4]))
+ converge_indices = []
+ # Note it always assumes that `l_bnd` goes to `i_maxima` and `u_bnd` goes to `basin_switch`.
+ for i, (i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch, i_ias) in enumerate(ias_indices):
+ #print((i_maxima, i_ang, l_bnd, u_bnd, ss, basin_switch))
+ if basins[i] == i_maxima:
+ # Calculate new step-size between [(l_bnd + u_bnd) / 2, u_bnd]
+ new_l_bnd = (l_bnd + u_bnd) / 2
+ new_u_bnd = u_bnd
+ else:
+ # Calculate new stepsize between [l_bnd, (l_bnd + u_bnd) / 2]
+ new_l_bnd = l_bnd
+ new_u_bnd = (l_bnd + u_bnd) / 2
+ # Update the basin that it switches to.
+ if basin_switch != basins[i]:
+ basin_switch = basins[i]
+ new_ss = (new_u_bnd - new_l_bnd)
+ # If new stepsize was less than bnd_err, then we converge and should stop.
+ if new_ss <= bnd_err:
+ r_func[int(i_maxima)][int(i_ang)] = (new_l_bnd + new_u_bnd) / 2
+ basin_ias[int(i_maxima)][int(i_ias)] = int(basin_switch)
+ converge_indices.append(i) # Put in list to remove indices.
+ else:
+ # Update ias_indices for the next iteration for example l_bnd, u_bnd, step-size
+ # decrease step-size by two to make it smaller than the interval bound
+ ias_indices[i] = [i_maxima, i_ang, new_l_bnd, new_u_bnd, None, basin_switch, i_ias]
+
+ # Remove converged indices
+ # print("Convergence indices", converge_indices)
+ ias_indices = np.delete(ias_indices, converge_indices, axis=0)
+
+ # Solve for multiple intersections
+ return r_func, basin_ias
diff --git a/chemtools/topology/yu_trinkle.py b/chemtools/topology/yu_trinkle.py
new file mode 100644
index 00000000..bfdf2125
--- /dev/null
+++ b/chemtools/topology/yu_trinkle.py
@@ -0,0 +1,515 @@
+import numpy as np
+from scipy.sparse import lil_matrix
+from scipy.spatial import ConvexHull, Voronoi
+from scipy.spatial.distance import cdist
+from grid.cubic import UniformGrid, _HyperRectangleGrid
+
+
+__all__ = [""]
+
+class _BasinContainer(object):
+ __slots__ = ["basin", "numb_basins_found", "num_centers"]
+
+ def __init__(self, num_pts, num_centers=None):
+ self.numb_basins_found = 0
+ self.num_centers = 1 if num_centers is None else num_centers
+ self.basin = lil_matrix((num_pts, self.num_centers), dtype=np.float64)
+
+ def __getitem__(self, index):
+ # Get the basin values for the `index`th point based on the maximum.
+ # If the basin value returned is -1.0, it means it wasn't found yet.
+ arr = self.basin.getrow(index).toarray()[0] # [0] converts ndim to 1.
+ if any(x != 0.0 for x in arr):
+ # Plus one because basin values defined/starts at one.
+ return arr.argmax() + 1
+ return -1.0
+
+ def __setitem__(self, index, basin):
+ # This is the case of using Henklemenb/Bader's method on watershed points.
+ if isinstance(basin, (int, float, np.float64, np.int64)):
+ if basin > 0.0:
+ # Assign to the `index` point to basin number with one.
+ self.basin[index, int(basin) - 1] = 1.0
+ else:
+ raise ValueError(f"Basin value {basin} to the point {index} should be "
+ f"greater than zero.")
+ # This is the case of when you use Yu-trinkle algorithm on watershed points.
+ elif isinstance(basin, (list, np.ndarray)):
+ self.basin[index, :] = basin
+ else:
+ raise TypeError(f"Basin {type(basin)} should be a number of a float/list/array.")
+
+ def get_basins_from_indices(self, indices):
+ # Get the basins from the indices of the points removing all zero elements.
+ # FIXME : Add error exception if hte indices from watershed backtracing are outside the
+ # grid.
+ # This removes -1 because the __get_item__ returns -1.0 if it doesn't have any neighbors.
+ return {self.__getitem__(i) for i in indices} - {-1.0}
+
+ def get_basin_weights_of_points(self, indices):
+ # Given a set of point indices, get the basin weights for all weights
+ return self.basin[indices].toarray()
+
+ def increase_numb_atoms(self):
+ self.numb_basins_found += 1
+ # If the numb basins found is greater than num_centers (specified by user)
+ # then it resizes the sparse array.
+ if self.numb_basins_found > self.num_centers:
+ self.num_centers += 1
+ shape = self.basin.shape
+ # Resize is better than reshape as it changes it in-place.
+ self.basin.resize((shape[0], self.num_centers))
+
+
+def _get_area_of_coplanar_polygon(points):
+ # see math stackexchange: how-do-you-calculate-the-area-of-a-2d-polygon-in-3d
+ # points (M, 3) array, assumes the points all lie on a plane, i.e. coplanar
+ # this assumes that the point are ordered adjacently.
+ num_verts = points.shape[0]
+ center = np.sum(points, axis=0) / num_verts # get the center of points
+ area = 0
+ for i in range(num_verts):
+ v_i_plues_one = points[0] if i == num_verts - 1 else points[i + 1]
+ area += np.linalg.norm(np.cross(
+ (points[i] - center), (v_i_plues_one - center)
+ )) / 2.0
+ return area
+
+
+def _get_area_of_voronoi_ridge(i_pt, i_nbh, index_to_voronoi_ridge, voronoi):
+ # index_to_voronoi_ridge list of lists
+ # find the row index r_{ij} in voronoi.ridge_points that contains (i, j)
+ ith_voronoi_ridges = index_to_voronoi_ridge[i_pt]
+ i_nbh_voronoi_ridges = index_to_voronoi_ridge[i_nbh]
+ ridge_index = (set(ith_voronoi_ridges) & set(i_nbh_voronoi_ridges)) # Take intersection
+ assert len(ridge_index) == 1
+ ridge_index = ridge_index.pop()
+ # Get the voronoi vertices via : voronoi.vertices[delaunay.ridge_vertices[r_{ij}]].
+ # voronoi.ridge_vertices[r_{ij}] makes sure it doesn't have -1 in it.
+ ridge_vertices = voronoi.vertices[voronoi.ridge_vertices[ridge_index]]
+ print("ridge vertices", ridge_vertices, "voronoi ridge vertices",
+ voronoi.ridge_vertices[ridge_index])
+ assert -1 not in voronoi.ridge_vertices[ridge_index]
+ # Get the area defined by the polygon of size 4, this assumes the polygon is coplanar,
+ # i.e. lies on a plane. For a calculation of the formula see:
+ # stack exchange article: how-do-you-calculate-the-area-of-a-2d-polygon-in-3d
+ # return _get_area_of_coplanar_polygon(ridge_vertices)
+ if len(ridge_vertices) <= 3:
+ return _get_area_of_coplanar_polygon(ridge_vertices)
+ return ConvexHull(ridge_vertices, qhull_options="QJ").area / 2.0
+
+
+def _assign_weight_yu_trinkle_voronoi(index, basin_cont, density_vals, voronoi,
+ neighbors_index, index_to_voronoi_ridge):
+ total_fraction = 0.0
+ weights = np.zeros((basin_cont.num_centers,))
+ weights_nbhs = basin_cont.get_basin_weights_of_points(neighbors_index)
+ print("basin weights of neighbors", weights_nbhs)
+ # Go through each neighbour X` of the `index`th point X.
+ for k, nbh_index in enumerate(neighbors_index):
+ density_diff = density_vals[nbh_index] - density_vals[index]
+ print("Nbh Index ", nbh_index, "Nbh Point ", voronoi.points[nbh_index],
+ "Density Diff ", density_diff, "Densities ", density_vals[nbh_index],
+ density_vals[index])
+ # Only look at neighbours X` whose density values is greater
+ if 0.0 < density_diff:
+ # Calc flux-probability: J_{X, X`} = a_{X, X`} (p_{X`} - p_{X}) / l_{X, X`}
+ length = np.linalg.norm(voronoi.points[index] - voronoi.points[nbh_index])
+ area = _get_area_of_voronoi_ridge(index, nbh_index, index_to_voronoi_ridge, voronoi)
+ flux_probability = area * density_diff / length
+ print("Flux Probability ", flux_probability, "length ", length)
+ total_fraction += flux_probability
+ print("Weight of neighbor", weights_nbhs[k], "area", area)
+ # Calculate w^A(X) = \sum_{X`} J_{X, X`} w^A(X`)
+ weights += flux_probability * weights_nbhs[k]
+
+ weights /= total_fraction
+ assert total_fraction != 0.0, "The neighbors most likely have the same density values, finer grid " \
+ "which can avoid points with identical neighbors is recommended."
+ print(total_fraction, weights)
+ return weights
+
+
+def _assign_weight_yu_trinkle_cubic(index, basin_cont, density_vals, closest_nbh_indices,
+ uniform_grid, areas, grad_func=None):
+ total_fraction = 0.0
+ weights = np.zeros((basin_cont.num_centers,))
+ weights_nbhs = basin_cont.get_basin_weights_of_points(closest_nbh_indices)
+
+ print("basin weights of neighbors", weights_nbhs)
+ # Go through each neighbour X` of the `index`th point X.
+ for k, nbh_index in enumerate(closest_nbh_indices):
+ density_diff = density_vals[nbh_index] - density_vals[index]
+ print("Nbh Index ", nbh_index, "Nbh Point ", uniform_grid.points[nbh_index], "Density diff",
+ density_diff)
+ # Only look at neighbours X` whose density values is greater
+ if 0.0 < density_diff:
+ # Calc flux-probability: J_{X, X`} = a_{X, X`} (p_{X`} - p_{X}) / l_{X, X`}
+ length = np.linalg.norm(uniform_grid.points[index] - uniform_grid.points[nbh_index])
+ # area = _get_area_of_voronoi_ridge(index, nbh_index, index_to_voronoi_ridge, voronoi)
+ area = areas[k]
+ print("area", area, "length ", length)
+ if grad_func is None:
+ flux_probability = area * density_diff / length
+ else:
+ # calculate normal to the Voronoi Facet/Boundary
+ normal = uniform_grid.points[nbh_index] - uniform_grid.points[index]
+ normal /= length
+ midpoint = (uniform_grid.points[index] + uniform_grid.points[nbh_index]) / 2.0
+ flux_probability = area * normal.dot(grad_func(np.array([midpoint]))[0])
+ print("Flux Probability ", flux_probability)
+ total_fraction += flux_probability
+ print("Weight of neighbor", weights_nbhs[k])
+ # Calculate w^A(X) = \sum_{X`} J_{X, X`} w^A(X`)
+ weights += flux_probability * weights_nbhs[k]
+
+ weights /= total_fraction
+ return weights
+
+
+def _get_neighbor_and_ridge_information_from_voronoi(voronoi):
+ r"""
+ Voronoi data structure doesn't give a good data-structure to figure out which neighbors are
+ between a point in `points`. So here, we convert that based on the attribute
+ voronoi.ridge_points (Indices of the points between which each Voronoi ridge lies.).
+ Neighbours[i_pt]=[i_1, ..., i_n] gets the neighbors indices of the i_pt.
+ index_to_voronoi_ridge[i_pt] = [r_1, .., r_N] gives the voronoi ridges index r_k.
+ """
+ neighbors_indices = [[] for _ in range(0, voronoi.points.shape[0])]
+ index_to_voronoi_ridge = [[] for _ in range(0, voronoi.points.shape[0])]
+ for i_ridge, (x, y) in enumerate(voronoi.ridge_points):
+ neighbors_indices[x] += [y]
+ neighbors_indices[y] += [x]
+ index_to_voronoi_ridge[x] += [i_ridge]
+ index_to_voronoi_ridge[y] += [i_ridge]
+ return neighbors_indices, index_to_voronoi_ridge
+
+
+def _assign_weight_average_neighbors(index, basin_cont, voronoi, original_num_pts):
+ # QHull voronoi algorithm has trouble with corner points and finding neighbors
+ # stackoverflow: questions/25754145/scipy-voronoi-3d-not-all-ridge-points-are-shown .
+ # Solution:find the three closest points to this point and take the average of the
+ # weights to define the weight of this point. Three was chosen because a corner
+ # point in a rectangular grid has three neighbors.
+ print("QHull/Voronoi couldn't find neighbors, manually find average.")
+ distance = cdist(voronoi.points[index:index + 1], voronoi.points)[0]
+ min_index = distance.argsort()
+ min_index = np.delete(min_index, min_index >= original_num_pts)
+ min_index = min_index[1:4] # ignore first point cause it is itself
+ basin_wghts = basin_cont.get_basin_weights_of_points(min_index)
+ print("Average ", basin_wghts)
+ weights = np.average(basin_wghts, axis=0) # Take average
+ print("Average Weights", weights)
+ return weights
+
+
+def voronoi_volumes(voronoi):
+ # Given Voronoi, this calculates the volume of each Voronoi region.
+ vol = np.zeros(voronoi.npoints)
+ for i, reg_num in enumerate(voronoi.point_region):
+ indices = voronoi.regions[reg_num]
+ if -1 in indices: # some regions can be opened
+ vol[i] = np.inf
+ else:
+ vol[i] = ConvexHull(voronoi.vertices[indices]).volume
+ return vol
+
+
+def close_neighbors_step():
+ # Given a point coordinate in 3D grid (i, j, k). Adding these give the coordinates of its
+ # close neighbors.
+ return np.array([
+ [-1, 0, 0],
+ [0, -1, 0],
+ [0, 0, 1],
+ [0, 0, -1],
+ [0, 1, 0],
+ [1, 0, 0]
+ ])
+
+
+def close_diagonal_neighbors_step():
+ # Given a point coordinate in 3D grid (i, j, k). Adding these give the coordinates of its
+ # close diagonal neighbors.
+ return np.array([
+ [-1, -1, 0],
+ [-1, 0, -1],
+ [-1, 0, 1],
+ [-1, 1, 0],
+ [0, -1, -1],
+ [0, -1, 1],
+ [0, 1, -1],
+ [0, 1, 1],
+ [1, -1, 0],
+ [1, 0, -1],
+ [1, 0, 1],
+ [1, 1, 0],
+ ])
+
+
+def diagonal_neighbors_step():
+ # Given a point coordinate in 3D grid (i, j, k). Adding these give the coordinates of its
+ # diagonal neighbors.
+ return np.array([
+ [-1, -1, -1],
+ [-1, -1, 1],
+ [-1, 1, -1],
+ [-1, 1, 1],
+ [1, -1, -1],
+ [1, -1, 1],
+ [1, 1, -1],
+ [1, 1, 1]
+ ])
+
+
+def _points_on_bounding_box(points, step_size=0.25, extension=0.01):
+ r"""Get the points on the surface of a bounding box of a specified grid."""
+ # Place bounding box over the points
+ l_bnd = np.min(points, axis=0)
+ u_bnd = np.max(points, axis=0)
+
+ # Compute the required number of points along x, y, and z axis
+ shape = np.ceil((u_bnd - l_bnd + 2.0 * extension) / step_size)
+ axes = np.eye(3) * step_size
+
+ # construct x-y plane bottom and top
+ coords = np.array(np.meshgrid(np.arange(shape[0] - 1), np.arange(shape[1] - 1), [0.]))
+ coords = np.swapaxes(coords, 1, 2)
+ coords = coords.reshape(3, -1)
+ new_pts_bottom = coords.T.dot(axes) + np.array([l_bnd[0], l_bnd[1], l_bnd[2] - extension])
+ points = np.vstack((points, new_pts_bottom))
+ new_pts_up = coords.T.dot(axes) + np.array([l_bnd[0], l_bnd[1], u_bnd[2] + extension])
+ points = np.vstack((points, new_pts_up))
+
+ # construct y-z plane left and right
+ coords = np.array(np.meshgrid([0.], np.arange(shape[1] - 1), np.arange(shape[2] - 1)))
+ coords = np.swapaxes(coords, 1, 2)
+ coords = coords.reshape(3, -1)
+ new_pts_left = coords.T.dot(axes) + np.array([l_bnd[0] - extension, l_bnd[1], l_bnd[2]])
+ points = np.vstack((points, new_pts_left))
+ new_pts_right = coords.T.dot(axes) + np.array([u_bnd[0] + extension, l_bnd[1], l_bnd[2]])
+ points = np.vstack((points, new_pts_right))
+
+ # construct x-z plane towards and back
+ coords = np.array(np.meshgrid(np.arange(shape[0] - 1), [0.], np.arange(shape[2] - 1)))
+ coords = np.swapaxes(coords, 1, 2)
+ coords = coords.reshape(3, -1)
+ new_pts_down = coords.T.dot(axes) + np.array([l_bnd[0], l_bnd[1] - extension, l_bnd[2]])
+ points = np.vstack((points, new_pts_down))
+ new_pts_up = coords.T.dot(axes) + np.array([l_bnd[0], u_bnd[1] + extension, l_bnd[2]])
+ points = np.vstack((points, new_pts_up))
+
+ unique_pts, indices = np.unique(points, return_index=True, axis=0)
+ assert unique_pts.shape == points.shape, "Bounding box is not unique."
+ return points
+
+
+def _get_neighbor_indices_for_cubic_grid(index, type, uniform_grid, return_area=False):
+ coord = uniform_grid.index_to_coordinates(index)
+ print(coord)
+ if type == "closest-neighbors":
+ nbh_coords = close_neighbors_step() + coord
+ elif type == "all-neighbors":
+ nbh_coords = np.vstack(
+ (close_neighbors_step(), diagonal_neighbors_step(), close_diagonal_neighbors_step())
+ ) + coord
+ else:
+ raise ValueError(f"Could not recognize {type}.")
+
+ # -1 or num_pts means neighbors doesn't exist.
+ nbh_coords = np.delete(nbh_coords, np.any(nbh_coords == -1, axis=1), axis=0)
+ nbh_coords = np.delete(nbh_coords, np.any(nbh_coords == uniform_grid.shape, axis=1), axis=0)
+ closest_nbh_indices = [uniform_grid.coordinates_to_index(x) for x in nbh_coords]
+ if return_area:
+ if type == "closest-neighbors":
+ ss = np.array([np.linalg.norm(axis) for axis in uniform_grid.axes])
+ ss = (1 - np.abs(close_neighbors_step())) * ss
+ ss[ss == 0] = 1
+ return closest_nbh_indices, np.prod(ss, axis=1)
+ else:
+ raise ValueError(f"`return_area` is true only when type == 'closest-neighbors'.")
+ return closest_nbh_indices
+
+
+def qtaim(grid_pts, density_vals, grad_func=None, num_centers=None,
+ remove_duplicates=True, bounding_box=True):
+ r"""
+ Find basins from Yu-Trinkle algorithm on a cubic grid or a general grid using Voronoi diagrams.
+
+ If a general grid is used, then problems may arise due to instabilities of constructing
+ a Voronoi diagrams. Providing a cubic grid is more robust.
+
+ Parameters
+ ----------
+ grid_pts: theochem/grid._HyperRectangleGrid or np.ndarray
+ If it is the latter, then it is a cubic grid and whose Voronoi diagrams is known.
+ If it is an array of points then the Voronoi diagram is implemented.
+ density_vals: ndarrray
+ Density values of each of the grid points.
+ grad_func: Callable(ndarray(N,3) -> ndarray(N,3))
+ The gradient of the density values. If provided, then it calculates the weights of
+ the watershed points more accuaretely.
+ num_centers: int, optional
+ The number of centers/local maximas.
+ remove_duplicates: bool
+ If true, then it removes duplicates from `grid_pts`. This is due to construction
+ of Voronoi diagrams.
+ bounding_box: bool
+ If true, then constructs a bounding box around the atom.
+
+ Returns
+ -------
+ dict:
+ Dictionary with the following keys:
+
+ "basin_cont": CSC
+ Sparse array (CSC format) with columns correspond to each basin.
+ "maxima_indices": ndarray
+ Array that holds which points correspond to the local maxima based on the grid.
+ "watershed_indices": ndarray
+ Array that holds indices of which points correspond to watershed points.
+ "voronoi_volumes": ndarray or float
+ Corresponds to the volume of the Voronoi diagram.
+
+ Notes
+ -----
+ 1) Sort based on density values
+ 2) Calculate the Voronoi Diagram
+ 3) Find and store all neighbors of each point using the Voronoi Diagram, may miss
+ some points on boundary.
+ 4) Go through each point
+ i) See if the neighbors of that point are assigned.
+ ii) If no neighbours are assigned and it has neighbors then it is a maxima point
+ iii) If no neighbors are assigned and no neighbor information is found, then assing
+ its weight based on the average of its closest three points.
+ iv) If all neighbors that were assigned, are assigned to a single basin, assign this
+ to that basin.
+ v) If some neighbors are assigned to different basins, then this point is a watershed point.
+ and it solves using Yu-Trinkle method for the fractional points.
+
+ """
+ # Assert method values
+ if not isinstance(grid_pts, (_HyperRectangleGrid, np.ndarray)):
+ raise TypeError(f"Points should either be a numpy array or a UniformGrid object.")
+ is_cubic_grid = isinstance(grid_pts, _HyperRectangleGrid)
+ points = grid_pts if not is_cubic_grid else grid_pts.points
+
+ if remove_duplicates:
+ points, indices = np.unique(points, return_index=True, axis=0)
+ density_vals = density_vals[indices]
+
+ original_num_pts = points.shape[0]
+ if not is_cubic_grid:
+ if bounding_box:
+ points = _points_on_bounding_box(points, extension=0.25, step_size=0.1)
+ voronoi = Voronoi(points=points, qhull_options="Qbb Qc Qz")
+ assert np.all(
+ np.abs(voronoi.points - points) < 1e-8), "Voronoi points should be the same as points."
+ # neighbors_indices: mapping of point index to the neighbors using Voronoi diagram
+ # neighbors_indices: mapping of point index to the Voronoi ridges it's part of.
+ neighbors_indices, index_to_voronoi_ridge = _get_neighbor_and_ridge_information_from_voronoi(
+ voronoi
+ )
+
+ # num_centers speed calculations up dramatically due to sparsity structure
+ num_centers = 1 if num_centers is None else num_centers
+ maxima_indices = [] # Indices of the centers.
+ watershed_indices = [] # indices of points that are on the boundary of the basin.
+ basin_cont = _BasinContainer(original_num_pts, num_centers)
+ # TODO: When density values are sorted, maybe group them based on similar values and pick
+ # the ones closest to the points.
+ sorted_density_indices = np.argsort(density_vals)[::-1]
+
+ # Go through each point with the highest density values to the smallest.
+ for i, index in enumerate(sorted_density_indices):
+ print("Index ", index, " Point ", points[index], " Density value ", density_vals[index])
+ if not is_cubic_grid:
+ # Get the closest neighbor indices and remove those that are part of the bounding box.
+ closest_nbh_indices = neighbors_indices[index]
+ closest_nbh_indices = [i for i in closest_nbh_indices if i < original_num_pts]
+ print("Voronoi ", voronoi.regions[voronoi.point_region[index]])
+ else:
+ closest_nbh_indices, areas = _get_neighbor_indices_for_cubic_grid(
+ index, "closest-neighbors", grid_pts, return_area=True
+ )
+ # Get the basin-values of the closest points.
+ basin_vals = basin_cont.get_basins_from_indices(closest_nbh_indices)
+ print("Neighbours Indices ", closest_nbh_indices)
+ print("Basin of neighbours ", basin_vals)
+ # Closest neighbours were all not assigned yet a basin, means it is a maximum.
+ if len(basin_vals) == 0:
+ if len(closest_nbh_indices) > 0: # If the neighbors were found
+ found_maxima = not is_cubic_grid
+ if is_cubic_grid and not found_maxima:
+ # Check all neighbors rather than close neighbors. Voronoi already checks all.
+ all_nbh_indices = _get_neighbor_indices_for_cubic_grid(
+ index, "all-neighbors", grid_pts
+ )
+ print("All neighbors", all_nbh_indices)
+ basin_vals = basin_cont.get_basins_from_indices(all_nbh_indices)
+ found_maxima = len(basin_vals) == 0
+
+ if found_maxima:
+ print("Maximum found")
+ maxima_indices.append(index)
+ basin_cont.increase_numb_atoms()
+ basin_cont[index] = basin_cont.numb_basins_found
+ print("Number of basins founds ", basin_cont.numb_basins_found,
+ " Number Centers Specified At Beginning ", basin_cont.num_centers)
+ elif len(basin_vals) == 1:
+ # (Cubic grid only)Diagonal element probably assigned, so assign it to that.
+ print("Point assigned to the basin of the neighbors")
+ basin_cont[index] = basin_vals.pop()
+ else:
+ # TODO this case do watershed
+ # Most likely occurs due to exact density vals and sorting is unordered.
+ raise NotImplementedError("TODO")
+ elif not is_cubic_grid:
+ # Assign weight based on average of its neighbors due to problems with QHull.
+ weights = _assign_weight_average_neighbors(
+ index, basin_cont, voronoi, original_num_pts
+ )
+ # assert the weights are not all zero
+ if np.all(weights == 0.0):
+ print("weights ", weights)
+ raise RuntimeError("Weights are all zero")
+ basin_cont[index] = weights
+
+ # All neighbours were assigned to a single basin, assign this point to that basin.
+ elif len(basin_vals) == 1:
+ print("Point assigned to the basin of the neighbors")
+ basin_cont[index] = basin_vals.pop()
+ else:
+ # It is assigned to multiple basins.
+ print("Found watershed point")
+ watershed_indices.append(index)
+ # Consider the case it is a critical point, how do you check for this?
+ # check for the gradient. Consider that one could assign a special criteria for this.
+ if not is_cubic_grid:
+ neighbor_index = neighbors_indices[index]
+ neighbor_index = [i for i in neighbor_index if i < original_num_pts]
+ weights = _assign_weight_yu_trinkle_voronoi(
+ index, basin_cont, density_vals, voronoi, neighbor_index,
+ index_to_voronoi_ridge
+ )
+ else:
+ weights = _assign_weight_yu_trinkle_cubic(
+ index, basin_cont, density_vals, closest_nbh_indices, grid_pts, areas,
+ grad_func
+ )
+
+ print(weights)
+ print("Sum of weights ", np.sum(weights))
+ basin_cont[index] = weights
+ if np.abs(np.sum(weights) - 1.0) > 1e-10:
+ raise RuntimeError(
+ f"The Weights {weights} did not sum {np.sum(weights)} up to one."
+ )
+ print("")
+
+ # TODO: Update watershed indices
+ # Calculate Voronoi volumes
+ volume = np.prod(np.array([np.linalg.norm(axis) for axis in grid_pts.axes])) if is_cubic_grid \
+ else voronoi_volumes(voronoi)[:original_num_pts]
+ return {"basin_cont": basin_cont.basin.tocsc(), "maxima_indices": maxima_indices,
+ "watershed_indices": watershed_indices, "voronoi_volumes": volume}
\ No newline at end of file