Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ authors = ["Ben Corbett and contributors"]
version = "0.2.1"

[deps]
Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
FunctionWrappers = "069b7b12-0de2-55c6-9aab-29f3d0a68a2e"
ITensorMPS = "0d1a4710-d33b-49a5-8f18-73bdf49b47e2"
ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5"
Expand All @@ -15,6 +16,7 @@ ThreadsX = "ac1d9e8a-700a-412c-b207-f0111f4b6c0d"
TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"

[compat]
Dictionaries = "0.4.6"
FunctionWrappers = "1.1.3"
ITensorMPS = "0.1, 0.2, 0.3"
ITensors = "0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9"
Expand Down
24 changes: 13 additions & 11 deletions examples/electronic-structure.jl
Original file line number Diff line number Diff line change
Expand Up @@ -151,11 +151,13 @@ for alg in ("VC", "QR")
) # TODO: remove splitblocks=true after warning
N > 5 && print_timer()

percent_sparse = round(100 * sparsity(H); digits=2)
println(
"The maximum bond dimension is $(maxlinkdim(H)), sparsity = $(ITensorMPOConstruction.sparsity(H))",
"The maximum bond dimension is $(maxlinkdim(H)), sparsity = $percent_sparse%",
)
@assert maxlinkdim(H) == 2 * N^2 + 3 * N + 2

GC.gc(true)
println()
end
end
Expand All @@ -167,16 +169,16 @@ nothing;
#
# | ``N`` | Minimum Vertex Cover | QR Decomposition |
# |-------|-------------------------|-----------------------|
# | 10 | 0.02s / 98.23% | 0.13s / 95.69% |
# | 20 | 0.90s / 98.84% | 1.20s / 97.01% |
# | 30 | 1.04s / 99.14% | 8.37s / 97.54% |
# | 40 | 3.30s / 99.32% | 16.3s / 97.72% |
# | 50 | 9.13s / 99.44% | 56.1s / 97.83% |
# | 60 | 21.1s / 99.52% | 169s / 97.90% |
# | 70 | 50.7s / 99.58% | 516s / 97.95% |
# | 80 | 106s / 99.63% | N/A |
# | 90 | 221s / 99.67% | N/A |
# | 100 | 744s / 99.70% | N/A |
# | 10 | 0.01s / 98.23% | 0.04s / 95.69% |
# | 20 | 0.81s / 98.84% | 0.54s / 97.01% |
# | 30 | 0.94s / 99.14% | 3.49s / 97.54% |
# | 40 | 2.88s / 99.32% | 14.4s / 97.73% |
# | 50 | 7.42s / 99.44% | 50.8s / 97.83% |
# | 60 | 17.2s / 99.52% | 159s / 97.90% |
# | 70 | 40.2s / 99.58% | 452s / 97.95% |
# | 80 | 95.0s / 99.63% | N/A |
# | 90 | 207s / 99.67% | N/A |
# | 100 | 739s / 99.70% | N/A |
#
# Not only does the vertex cover algorithm produce an MPO much faster than the QR algorithm, but the resulting MPO has almost five times fewer entries (note: if `splitblocks=false` then the sparsities of the MPOs are equal). This story remains mostly unchanged when we construct the Hamiltonian for real systems.

Expand Down
16 changes: 8 additions & 8 deletions examples/fermi-hubbard-ks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,14 @@ end
# | ``N`` | ITensorMPS | ITensorMPOConstruction: QR |
# |-------|---------------|-----------------------------|
# | 10 | 0.32s / 92.7% | 0.01s / 99.32% |
# | 20 | 30.6s / 92.6% | 0.06s / 99.70% |
# | 30 | 792s / 92.6% | 0.53s / 99.81% |
# | 40 | N/A | 0.55s / 99.86% |
# | 50 | N/A | 0.38s / 99.89% |
# | 100 | N/A | 2.81s / 99.94% |
# | 200 | N/A | 30.3s / 99.97% |
# | 20 | 30.6s / 92.6% | 0.04s / 99.70% |
# | 30 | 792s / 92.6% | 0.45s / 99.81% |
# | 40 | N/A | 0.48s / 99.86% |
# | 50 | N/A | 0.29s / 99.89% |
# | 100 | N/A | 2.46s / 99.94% |
# | 200 | N/A | 39.1s / 99.97% |
# | 300 | N/A | 136s / 99.982% |
# | 400 | N/A | 415s / 99.986% |
# | 500 | N/A | 1274s / 99.989% |
# | 400 | N/A | 438s / 99.986% |
# | 500 | N/A | 1313s / 99.989% |
#
# Additionally, we constructed the MPO using the vertex cover algorithm, which turns out to be particularly ill-suited to this problem. Unlike the two other algorithms, the vertex cover algorithm produces MPOs of bond dimension ``1.5 N^2 + 4 N + 2``, far from the optimal of ``10 N - 4``. Granted, with `splitblocks=true` and `N = 100`, the MPO has a sparsity of 99.994%, but with a bond dimension of 15402 it still contains 25 times more nonzero entries than the MPO produced with the QR decomposition.
15 changes: 12 additions & 3 deletions examples/fermi-hubbard-tc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ end
using TimerOutputs
for grid_size in ((2, 2), (6, 6))
let t = 1, U = 4, J = -0.5, mapping = bipartite_mapping(grid_size)
@time "Constructing OpIDSum" sites, os = transcorrelated_fermi_hubbard(t, U, J, mapping)
@time "Constructing OpIDSum" sites, os = transcorrelated_fermi_hubbard(t, U, J, mapping; conserve_momentum=true)
reset_timer!()
@time "Constructing MPO" H = MPO_new(
os,
Expand All @@ -400,10 +400,19 @@ for grid_size in ((2, 2), (6, 6))
check_for_errors=false,
)

prod(grid_size) > 4 && print_timer()

percent_sparse = round(100 * sparsity(H); digits=3)
println(
"Constructed the MPO of bond dimension $(maxlinkdim(H)) and sparsity $(sparsity(H))"
"The maximum bond dimension is $(maxlinkdim(H)), sparsity = $percent_sparse%",
)
grid_size != (2, 2) && print_timer()

if iszero(J) && length(grid_size) == 1 && mapping == standard_mapping(grid_size)
@assert maxlinkdim(H) == 10 * N - 4
end

GC.gc(true)
println()
end
end

Expand Down
1 change: 1 addition & 0 deletions src/ITensorMPOConstruction.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ module ITensorMPOConstruction
#
using ITensorMPS
using ITensors
using Dictionaries
using LinearAlgebra
using SparseArrays
using TimerOutputs
Expand Down
77 changes: 6 additions & 71 deletions src/large-graph-mpo-common.jl
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
"""
BlockSparseMatrix{C}

Dictionary-backed block-sparse matrix representation used for intermediate MPO
Vector-backed block-sparse matrix representation used for intermediate MPO
tensor storage.

Keys are `(left_link, right_link)` pairs and values are dense local operator
matrices for that block. The right link ids are local to the component being
processed; `at_site!` later returns offsets that place component-local
right-link ids into the full outgoing MPO bond.
The outer vector is indexed by component-local `right_link`. Each inner
`Dictionary` maps `left_link` to the dense local operator matrix for that block.
`at_site!` later returns offsets that place component-local right-link ids into
the full outgoing MPO bond.
"""
BlockSparseMatrix{C} = Dict{Tuple{Int,Int},Matrix{C}}
# TODO: consider changing to Vector{Dict{Int, Matrix{C}}} from right_link => (left_link => matrix) and use Dictionaries.jl
BlockSparseMatrix{C} = Vector{Dictionary{Int,Matrix{C}}}

"""
MPOGraph{N,C,Ti}
Expand Down Expand Up @@ -332,70 +331,6 @@ function merge_qn_sectors(
return new_order, new_qi
end

"""
process_single_left_vertex_cc!(
matrix_of_cc, rank_of_cc, next_edges_of_cc, g, ccs, cc, n, sites, op_cache_vec
) -> Nothing

Handle the common connected-component case with exactly one left vertex.

This fills the local MPO tensor contribution for the component, records rank
`1`, and either applies the terminal scaling on the last site or builds the
outgoing edges for the next site. For nonterminal sites, the consumed left
adjacency list is cleared after the outgoing edges are built.
"""
function process_single_left_vertex_cc!(
matrix_of_cc::Vector{BlockSparseMatrix{ValType}},
rank_of_cc::Vector{Int},
next_edges_of_cc::Vector{Matrix{Tuple{Vector{Int},Vector{C}}}},
g::MPOGraph{N,C,Ti},
ccs::BipartiteGraphConnectedComponents,
cc::Int,
n::Int,
sites::Vector{<:Index},
op_cache_vec::OpCacheVec,
)::Nothing where {ValType<:Number,N,C,Ti}
lv_id = only(ccs.lvs_of_component[cc])
rank = 1
rank_of_cc[cc] = rank

lv = left_vertex(g, lv_id)
local_op = op_cache_vec[n][lv.op_id].matrix

matrix_element = get!(matrix_of_cc[cc], (lv.link, rank)) do
return zeros(ValType, dim(sites[n]), dim(sites[n]))
end

add_to_local_matrix!(matrix_element, one(ValType), local_op, lv.needs_JW_string)

if n == length(sites)
scaling = only(g.edge_weights_from_left[lv_id])

for block in values(matrix_of_cc[cc])
block .*= scaling
end

return nothing
end

next_edges = Matrix{Tuple{Vector{Int},Vector{C}}}(
undef, rank, length(op_cache_vec[n + 1])
)
for i in eachindex(next_edges)
next_edges[i] = (Int[], C[])
end

build_next_edges_specialization!(
next_edges, g, n, g.right_vertex_ids_from_left[lv_id], g.edge_weights_from_left[lv_id]
)

clear_edges_from_left!(g, lv_id)

next_edges_of_cc[cc] = next_edges

return nothing
end

"""
at_site!(ValType, g, n, sites, tol, absolute_tol, op_cache_vec, alg;
combine_qn_sectors, output_level=0)
Expand Down
77 changes: 73 additions & 4 deletions src/large-graph-mpo-qr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,74 @@ function for_non_zeros_batch(
end
end

"""
process_single_left_vertex_cc!(
matrix_of_cc, rank_of_cc, next_edges_of_cc, g, ccs, cc, n, sites, op_cache_vec
) -> Nothing

Handle the common connected-component case with exactly one left vertex.

This fills the local MPO tensor contribution for the component, records rank
`1`, and either applies the terminal scaling on the last site or builds the
outgoing edges for the next site. For nonterminal sites, the consumed left
adjacency list is cleared after the outgoing edges are built.
"""
function process_single_left_vertex_cc!(
matrix_of_cc::Vector{BlockSparseMatrix{ValType}},
rank_of_cc::Vector{Int},
next_edges_of_cc::Vector{Matrix{Tuple{Vector{Int},Vector{C}}}},
g::MPOGraph{N,C,Ti},
ccs::BipartiteGraphConnectedComponents,
cc::Int,
n::Int,
sites::Vector{<:Index},
op_cache_vec::OpCacheVec,
)::Nothing where {ValType<:Number,N,C,Ti}
lv_id = only(ccs.lvs_of_component[cc])
rank = 1
rank_of_cc[cc] = rank

lv = left_vertex(g, lv_id)
local_op = op_cache_vec[n][lv.op_id].matrix

matrix = [Dictionary{Int,Matrix{ValType}}() for _ in 1:rank]
matrix_of_cc[cc] = matrix

matrix_element = get!(matrix[rank], lv.link) do
return zeros(ValType, dim(sites[n]), dim(sites[n]))
end

add_to_local_matrix!(matrix_element, one(ValType), local_op, lv.needs_JW_string)

if n == length(sites)
scaling = only(g.edge_weights_from_left[lv_id])

for column in matrix, block in values(column)
block .*= scaling
end

return nothing
end

next_edges = Matrix{Tuple{Vector{Int},Vector{C}}}(
undef, rank, length(op_cache_vec[n + 1])
)
for i in eachindex(next_edges)
next_edges[i] = (Int[], C[])
end

build_next_edges_specialization!(
next_edges, g, n, g.right_vertex_ids_from_left[lv_id], g.edge_weights_from_left[lv_id]
)

clear_edges_from_left!(g, lv_id)

next_edges_of_cc[cc] = next_edges

return nothing
end


"""
process_qr(
matrix_of_cc, rank_of_cc, next_edges_of_cc, g, ccs, n, sites, tol, absolute_tol, op_cache_vec
Expand Down Expand Up @@ -115,7 +183,6 @@ blocks.
continue
end

matrix = matrix_of_cc[cc]
W, left_map, right_map = get_cc_matrix(g, ccs, cc; clear_edges=true)

## Compute the decomposition and then free W
Expand All @@ -124,6 +191,9 @@ blocks.

rank_of_cc[cc] = rank

matrix = [Dictionary{Int,Matrix{ValType}}() for _ in 1:rank]
matrix_of_cc[cc] = matrix

## Form the local transformation tensor.
for_non_zeros_batch(Q, rank) do weights::AbstractVector{C}, m::Int
for (i, weight) in enumerate(weights)
Expand All @@ -133,7 +203,7 @@ blocks.
lv = left_vertex(g, left_map[prow[i]])
local_op = op_cache_vec[n][lv.op_id].matrix

matrix_element = get!(matrix, (lv.link, m)) do
matrix_element = get!(matrix[m], lv.link) do
return zeros(ValType, dim(sites[n]), dim(sites[n]))
end

Expand All @@ -148,7 +218,7 @@ blocks.
## If we are at the last site, then R will be a 1x1 matrix containing an overall scaling.
if n == length(sites)
scaling = only(R)
for block in values(matrix)
for column in matrix, block in values(column)
block .*= scaling
end

Expand Down Expand Up @@ -208,4 +278,3 @@ blocks.

return nothing
end

9 changes: 5 additions & 4 deletions src/large-graph-mpo-vertex-cover.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ otherwise this also builds `next_edges_of_cc` for the graph at site `n + 1`.

# TODO: Consider nested multithreading
Threads.@threads for cc in 1:num_connected_components(ccs)
matrix = matrix_of_cc[cc]
lvs_of_component::Vector{Int} = ccs.lvs_of_component[cc]
position_of_rvs_in_component = ccs.position_of_rvs_in_component
rv_size_of_component = ccs.rv_size_of_component[cc]
Expand All @@ -45,6 +44,9 @@ otherwise this also builds `next_edges_of_cc` for the graph at site `n + 1`.
rank = length(left_cover) + length(right_cover)
rank_of_cc[cc] = rank

matrix = [Dictionary{Int,Matrix{ValType}}() for _ in 1:rank]
matrix_of_cc[cc] = matrix

## Construct the tensor from the left cover.
@inbounds for m in eachindex(left_cover)
lv_id = lvs_of_component[left_cover[m]]
Expand All @@ -53,7 +55,7 @@ otherwise this also builds `next_edges_of_cc` for the graph at site `n + 1`.

matrix_element = zeros(ValType, site_dim, site_dim)
add_to_local_matrix!(matrix_element, one(ValType), local_op, lv.needs_JW_string)
matrix[lv.link, m] = matrix_element
set!(matrix[m], lv.link, matrix_element)
end

## Construct the tensor from the right cover.
Expand Down Expand Up @@ -85,7 +87,7 @@ otherwise this also builds `next_edges_of_cc` for the graph at site `n + 1`.
for (rv_id, weight) in weighted_edge_iterator(g, lv_id)
m = right_cover_m[position_of_rvs_in_component[rv_id]]

matrix_element = get!(matrix, (lv.link, m)) do
matrix_element = get!(matrix[m], lv.link) do
zeros(ValType, site_dim, site_dim)
end

Expand Down Expand Up @@ -158,4 +160,3 @@ otherwise this also builds `next_edges_of_cc` for the graph at site `n + 1`.

return nothing
end

Loading
Loading