diff --git a/Project.toml b/Project.toml index edd2389e..fba18681 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman and contributors"] -version = "0.8.2" +version = "0.9.0" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" @@ -46,7 +46,7 @@ ITensorNetworksEinExprsExt = "EinExprs" AbstractTrees = "0.4.4" Combinatorics = "1" Compat = "3, 4" -DataGraphs = "0.1.13" +DataGraphs = "0.2.2" DataStructures = "0.18" Dictionaries = "0.4" Distributions = "0.25.86" @@ -54,11 +54,11 @@ DocStringExtensions = "0.8, 0.9" EinExprs = "0.6.4" Graphs = "1.8" GraphsFlows = "0.1.1" -ITensors = "0.3.58" +ITensors = "0.3.58, 0.4" IsApprox = "0.1" IterTools = "1.4.0" KrylovKit = "0.6, 0.7" -NamedGraphs = "0.1.23" +NamedGraphs = "0.5.1" NDTensors = "0.2, 0.3" Observers = "0.2" PackageExtensionCompat = "1" diff --git a/README.md b/README.md index 2f86bee0..629f281f 100644 --- a/README.md +++ b/README.md @@ -32,15 +32,13 @@ julia> ] add ITensorNetworks Here are is an example of making a tensor network on a chain graph (a tensor train or matrix product state): ```julia -julia> using Graphs: neighbors +julia> using Graphs: neighbors, path_graph -julia> using ITensorNetworks: ITensorNetwork, siteinds +julia> using ITensorNetworks: ITensorNetwork -julia> using NamedGraphs: named_grid, subgraph - -julia> tn = ITensorNetwork(named_grid(4); link_space=2) +julia> tn = ITensorNetwork(path_graph(4); link_space=2) ITensorNetworks.ITensorNetwork{Int64} with 4 vertices: -4-element Vector{Int64}: +4-element Dictionaries.Indices{Int64} 1 2 3 @@ -89,9 +87,13 @@ julia> neighbors(tn, 4) and here is a similar example for making a tensor network on a grid (a tensor product state or project entangled pair state (PEPS)): ```julia +julia> using NamedGraphs.GraphsExtensions: subgraph + +julia> using NamedGraphs.NamedGraphGenerators: named_grid + julia> tn = ITensorNetwork(named_grid((2, 2)); link_space=2) ITensorNetworks.ITensorNetwork{Tuple{Int64, Int64}} with 4 vertices: -4-element Vector{Tuple{Int64, Int64}}: +4-element Dictionaries.Indices{Tuple{Int64, Int64}} (1, 1) (2, 1) (1, 2) @@ -126,7 +128,7 @@ julia> neighbors(tn, (1, 2)) julia> tn_1 = subgraph(v -> v[1] == 1, tn) ITensorNetworks.ITensorNetwork{Tuple{Int64, Int64}} with 2 vertices: -2-element Vector{Tuple{Int64, Int64}}: +2-element Dictionaries.Indices{Tuple{Int64, Int64}} (1, 1) (1, 2) @@ -140,7 +142,7 @@ with vertex data: julia> tn_2 = subgraph(v -> v[1] == 2, tn) ITensorNetworks.ITensorNetwork{Tuple{Int64, Int64}} with 2 vertices: -2-element Vector{Tuple{Int64, Int64}}: +2-element Dictionaries.Indices{Tuple{Int64, Int64}} (2, 1) (2, 2) @@ -159,13 +161,13 @@ Networks can also be merged/unioned: ```julia julia> using ITensors: prime -julia> using ITensorNetworks: ⊗, contract, contraction_sequence +julia> using ITensorNetworks: ⊗, contract, contraction_sequence, siteinds julia> using ITensorUnicodePlots: @visualize julia> s = siteinds("S=1/2", named_grid(3)) ITensorNetworks.IndsNetwork{Int64, ITensors.Index} with 3 vertices: -3-element Vector{Int64}: +3-element Dictionaries.Indices{Int64} 1 2 3 @@ -185,7 +187,7 @@ and edge data: julia> tn1 = ITensorNetwork(s; link_space=2) ITensorNetworks.ITensorNetwork{Int64} with 3 vertices: -3-element Vector{Int64}: +3-element Dictionaries.Indices{Int64} 1 2 3 @@ -202,7 +204,7 @@ with vertex data: julia> tn2 = ITensorNetwork(s; link_space=2) ITensorNetworks.ITensorNetwork{Int64} with 3 vertices: -3-element Vector{Int64}: +3-element Dictionaries.Indices{Int64} 1 2 3 @@ -293,8 +295,8 @@ julia> @visualize Z; julia> contraction_sequence(Z) 2-element Vector{Vector}: - NamedGraphs.Key{Tuple{Int64, Int64}}[Key((1, 1)), Key((1, 2))] - Any[Key((2, 1)), Any[Key((2, 2)), NamedGraphs.Key{Tuple{Int64, Int64}}[Key((3, 1)), Key((3, 2))]]] + NamedGraphs.Keys.Key{Tuple{Int64, Int64}}[Key((1, 1)), Key((1, 2))] + Any[Key((2, 1)), Any[Key((2, 2)), NamedGraphs.Keys.Key{Tuple{Int64, Int64}}[Key((3, 1)), Key((3, 2))]]] julia> Z̃ = contract(Z, (1, 1) => (2, 1)); @@ -303,20 +305,20 @@ julia> @visualize Z̃; ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀Z̃(2, 1)⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀(2)'⠤⠤⠔⠒⠒⠉⠉⠀⠀⢱⠀⠈⠉⠑⠒⠢⠤⢄⣀2⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⠀⣀⣀⠤⠤⠔⠒⠊⠉⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⡆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⠉⠒⠒⠤⠤⢄⣀⡀⠀⠀⠀⠀⠀ - ⠀Z̃(3, 1)⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢱⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀Z̃(1, 2)⠀⠀ - ⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⡆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⠔⠁⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀2⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⠔⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⡠2⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⡠⠊⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⠀2⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⢀⠤⠊⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⠀⠀⢇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀Z̃(2, 2)⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⠀⠀⠸⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⡠⠤⠒⠊⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⠀⠀⠀⢇⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⠤2⠒⠉⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⡀⠀⣀⡠⠤⠒⠊⠉⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ - ⠀⠀⠀⠀⠀⠀Z̃(3, 2)⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀Z̃(3, 1)⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⠔⠁⠀⠀⠀⠉⠉⠑⠒⠒⠢⠤⠤⣀⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⠔⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈2⠉⠑⠒⠒⠤⠤⠤⣀⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀⠀⠀⠀⠀(2)'⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀Z̃(3, 2)⠀⠀ + ⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⠔⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⠤⠊⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀⠀⠀⢀⠔⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡠⠒⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀⢀⠔⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀2⠊⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀Z̃(2, 1)⠤⠤⣀⣀⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡠⠊⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀⢣⠀⠀⠀⠀⠀⠀⠀⠀⠉⠉⠑⠒2⠢⠤⠤⣀⣀⡀⠀⠀⠀⠀⠀⠀⠀⣀⠔⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠉Z̃(2, 2)⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀2⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⠤⠤⠒⠊⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⣀⡠⠤2⠒⠉⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀⠀⠀⠀⠀⢱⠀⢀⣀⠤⠔⠒⠊⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ + ⠀⠀Z̃(1, 2)⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ @@ -328,7 +330,7 @@ julia> @visualize Z̃; -This file was generated with [weave.jl](https://github.com/JunoLab/Weave.jl) with the following commands: +This file was generated with [Weave.jl](https://github.com/JunoLab/Weave.jl) with the following commands: ```julia using ITensorNetworks: ITensorNetworks diff --git a/examples/README.jl b/examples/README.jl index b9a3a43e..1b81df26 100644 --- a/examples/README.jl +++ b/examples/README.jl @@ -32,10 +32,9 @@ Random.seed!(ITensors.index_id_rng(), 1234); #' Here are is an example of making a tensor network on a chain graph (a tensor train or matrix product state): #+ term=true -using Graphs: neighbors -using ITensorNetworks: ITensorNetwork, siteinds -using NamedGraphs: named_grid, subgraph -tn = ITensorNetwork(named_grid(4); link_space=2) +using Graphs: neighbors, path_graph +using ITensorNetworks: ITensorNetwork +tn = ITensorNetwork(path_graph(4); link_space=2) tn[1] tn[2] neighbors(tn, 1) @@ -46,6 +45,8 @@ neighbors(tn, 4) #' and here is a similar example for making a tensor network on a grid (a tensor product state or project entangled pair state (PEPS)): #+ term=true +using NamedGraphs.GraphsExtensions: subgraph +using NamedGraphs.NamedGraphGenerators: named_grid tn = ITensorNetwork(named_grid((2, 2)); link_space=2) tn[1, 1] neighbors(tn, (1, 1)) @@ -57,7 +58,7 @@ tn_2 = subgraph(v -> v[1] == 2, tn) #+ term=true using ITensors: prime -using ITensorNetworks: ⊗, contract, contraction_sequence +using ITensorNetworks: ⊗, contract, contraction_sequence, siteinds using ITensorUnicodePlots: @visualize s = siteinds("S=1/2", named_grid(3)) tn1 = ITensorNetwork(s; link_space=2) @@ -72,7 +73,7 @@ Z̃ = contract(Z, (1, 1) => (2, 1)); #' ## Generating this README -#' This file was generated with [weave.jl](https://github.com/JunoLab/Weave.jl) with the following commands: +#' This file was generated with [Weave.jl](https://github.com/JunoLab/Weave.jl) with the following commands: #+ eval=false using ITensorNetworks: ITensorNetworks diff --git a/src/Graphs/abstractdatagraph.jl b/src/Graphs/abstractdatagraph.jl deleted file mode 100644 index bf75ab18..00000000 --- a/src/Graphs/abstractdatagraph.jl +++ /dev/null @@ -1,21 +0,0 @@ -using DataGraphs: DataGraphs, AbstractDataGraph, underlying_graph -using NamedGraphs: AbstractNamedGraph - -# TODO: we may want to move these to `DataGraphs.jl` -for f in [:_root, :_is_rooted, :_is_rooted_directed_binary_tree] - @eval begin - function $f(graph::AbstractDataGraph, args...; kwargs...) - return $f(underlying_graph(graph), args...; kwargs...) - end - end -end - -DataGraphs.edge_data_type(::AbstractNamedGraph) = Any - -Base.isassigned(::AbstractNamedGraph, ::Any) = false - -function Base.iterate(::AbstractDataGraph) - return error( - "Iterating data graphs is not yet defined. We may define it in the future as iterating through the vertex and edge data.", - ) -end diff --git a/src/Graphs/abstractgraph.jl b/src/Graphs/abstractgraph.jl deleted file mode 100644 index c170be58..00000000 --- a/src/Graphs/abstractgraph.jl +++ /dev/null @@ -1,71 +0,0 @@ -using Graphs: AbstractGraph, IsDirected, a_star -using NamedGraphs: child_vertices, undirected_graph -using SimpleTraits: @traitfn - -"""Determine if an edge involves a leaf (at src or dst)""" -function is_leaf_edge(g::AbstractGraph, e) - return is_leaf(g, src(e)) || is_leaf(g, dst(e)) -end - -"""Determine if a node has any neighbors which are leaves""" -function has_leaf_neighbor(g::AbstractGraph, v) - for vn in neighbors(g, v) - if (is_leaf(g, vn)) - return true - end - end - return false -end - -"""Get all edges which do not involve a leaf""" -function internal_edges(g::AbstractGraph) - return filter(e -> !is_leaf_edge(g, e), edges(g)) -end - -"""Get distance of a vertex from a leaf""" -function distance_to_leaf(g::AbstractGraph, v) - leaves = leaf_vertices(g) - if (isempty(leaves)) - println("ERROR: GRAPH DOES NTO CONTAIN LEAVES") - return NaN - end - - return minimum([length(a_star(g, v, leaf)) for leaf in leaves]) -end - -"""Return all vertices which are within a certain pathlength `dist` of the leaves of the graph""" -function distance_from_roots(g::AbstractGraph, dist::Int64) - return vertices(g)[findall(<=(dist), [distance_to_leaf(g, v) for v in vertices(g)])] -end - -""" -Return the root vertex of a rooted directed graph -""" -@traitfn function _root(graph::AbstractGraph::IsDirected) - @assert _is_rooted(graph) "the input $(graph) has to be rooted" - v = vertices(graph)[1] - while parent_vertex(graph, v) != nothing - v = parent_vertex(graph, v) - end - return v -end - -@traitfn function _is_rooted(graph::AbstractGraph::IsDirected) - roots = [v for v in vertices(graph) if parent_vertex(graph, v) == nothing] - return length(roots) == 1 -end - -@traitfn function _is_rooted_directed_binary_tree(graph::AbstractGraph::IsDirected) - if !_is_rooted(graph) - return false - end - if !is_tree(undirected_graph(graph)) - return false - end - for v in vertices(graph) - if length(child_vertices(graph, v)) > 2 - return false - end - end - return true -end diff --git a/src/ITensorNetworks.jl b/src/ITensorNetworks.jl index d908924e..6184cd77 100644 --- a/src/ITensorNetworks.jl +++ b/src/ITensorNetworks.jl @@ -1,11 +1,9 @@ module ITensorNetworks -include("usings.jl") -include("Graphs/abstractgraph.jl") -include("Graphs/abstractdatagraph.jl") +include("lib/BaseExtensions/src/BaseExtensions.jl") +include("lib/ITensorsExtensions/src/ITensorsExtensions.jl") include("observers.jl") include("visualize.jl") include("graphs.jl") -include("itensors.jl") include("abstractindsnetwork.jl") include("indextags.jl") include("indsnetwork.jl") @@ -15,16 +13,15 @@ include("abstractitensornetwork.jl") include("contraction_sequences.jl") include("tebd.jl") include("itensornetwork.jl") -include("mincut.jl") -include("contract_deltas.jl") -include("approx_itensornetwork/utils.jl") -include("approx_itensornetwork/density_matrix.jl") -include("approx_itensornetwork/ttn_svd.jl") -include("approx_itensornetwork/approx_itensornetwork.jl") -include("approx_itensornetwork/partition.jl") -include("approx_itensornetwork/binary_tree_partition.jl") +include("contract_approx/mincut.jl") +include("contract_approx/contract_deltas.jl") +include("contract_approx/utils.jl") +include("contract_approx/density_matrix.jl") +include("contract_approx/ttn_svd.jl") +include("contract_approx/contract_approx.jl") +include("contract_approx/partition.jl") +include("contract_approx/binary_tree_partition.jl") include("contract.jl") -include("utility.jl") include("specialitensornetworks.jl") include("boundarymps.jl") include("partitioneditensornetwork.jl") @@ -36,14 +33,13 @@ include("caches/beliefpropagationcache.jl") include("contraction_tree_to_graph.jl") include("gauging.jl") include("utils.jl") -include("ITensorsExtensions/ITensorsExtensions.jl") include("solvers/local_solvers/eigsolve.jl") include("solvers/local_solvers/exponentiate.jl") include("solvers/local_solvers/dmrg_x.jl") include("solvers/local_solvers/contract.jl") include("solvers/local_solvers/linsolve.jl") include("treetensornetworks/abstracttreetensornetwork.jl") -include("treetensornetworks/ttn.jl") +include("treetensornetworks/treetensornetwork.jl") include("treetensornetworks/opsum_to_ttn.jl") include("treetensornetworks/projttns/abstractprojttn.jl") include("treetensornetworks/projttns/projttn.jl") @@ -66,8 +62,8 @@ include("inner.jl") include("expect.jl") include("environment.jl") include("exports.jl") -include("ModelHamiltonians/ModelHamiltonians.jl") -include("ModelNetworks/ModelNetworks.jl") +include("lib/ModelHamiltonians/src/ModelHamiltonians.jl") +include("lib/ModelNetworks/src/ModelNetworks.jl") using PackageExtensionCompat: @require_extensions using Requires: @require diff --git a/src/abstractindsnetwork.jl b/src/abstractindsnetwork.jl index d87fefc4..d25f5abb 100644 --- a/src/abstractindsnetwork.jl +++ b/src/abstractindsnetwork.jl @@ -1,8 +1,10 @@ using ITensors: IndexSet -using DataGraphs: DataGraphs, AbstractDataGraph, edge_data, edge_data_type, vertex_data +using DataGraphs: DataGraphs, AbstractDataGraph, edge_data, vertex_data using Graphs: Graphs, AbstractEdge using ITensors: ITensors, unioninds, uniqueinds -using NamedGraphs: NamedGraphs, incident_edges, rename_vertices +using .ITensorsExtensions: ITensorsExtensions, promote_indtype +using NamedGraphs: NamedGraphs +using NamedGraphs.GraphsExtensions: incident_edges, rename_vertices abstract type AbstractIndsNetwork{V,I} <: AbstractDataGraph{V,Vector{I},Vector{I}} end @@ -21,7 +23,7 @@ function DataGraphs.edge_data(graph::AbstractIndsNetwork, args...) end # TODO: Define a generic fallback for `AbstractDataGraph`? -DataGraphs.edge_data_type(::Type{<:AbstractIndsNetwork{V,I}}) where {V,I} = Vector{I} +DataGraphs.edge_data_eltype(::Type{<:AbstractIndsNetwork{V,I}}) where {V,I} = Vector{I} ## TODO: Bring these back. ## function indsnetwork_getindex(is::AbstractIndsNetwork, index) @@ -95,7 +97,7 @@ end # Convenience functions # -function promote_indtypeof(is::AbstractIndsNetwork) +function ITensorsExtensions.promote_indtypeof(is::AbstractIndsNetwork) sitetype = mapreduce(promote_indtype, vertices(is); init=Index{Int}) do v # TODO: Replace with `is[v]` once `getindex(::IndsNetwork, ...)` is smarter. return mapreduce(typeof, promote_indtype, get(is, v, Index[]); init=Index{Int}) diff --git a/src/abstractitensornetwork.jl b/src/abstractitensornetwork.jl index 958a5845..59c3804a 100644 --- a/src/abstractitensornetwork.jl +++ b/src/abstractitensornetwork.jl @@ -37,18 +37,12 @@ using ITensors: sim, swaptags using ITensors.ITensorMPS: ITensorMPS, add, linkdim, linkinds, siteinds -using ITensors.ITensorVisualizationCore: ITensorVisualizationCore, visualize +using .ITensorsExtensions: ITensorsExtensions, indtype, promote_indtype using LinearAlgebra: LinearAlgebra, factorize using NamedGraphs: - NamedGraphs, - NamedGraph, - ⊔, - directed_graph, - incident_edges, - not_implemented, - rename_vertices, - vertex_to_parent_vertex, - vertextype + NamedGraphs, NamedGraph, not_implemented, parent_vertex_to_vertex, vertex_to_parent_vertex +using NamedGraphs.GraphsExtensions: + ⊔, directed_graph, incident_edges, rename_vertices, vertextype using NDTensors: NDTensors, dim using SplitApplyCombine: flatten @@ -59,7 +53,7 @@ data_graph_type(::Type{<:AbstractITensorNetwork}) = not_implemented() data_graph(graph::AbstractITensorNetwork) = not_implemented() # TODO: Define a generic fallback for `AbstractDataGraph`? -DataGraphs.edge_data_type(::Type{<:AbstractITensorNetwork}) = ITensor +DataGraphs.edge_data_eltype(::Type{<:AbstractITensorNetwork}) = ITensor # Graphs.jl overloads function Graphs.weights(graph::AbstractITensorNetwork) @@ -103,6 +97,9 @@ DataGraphs.underlying_graph(tn::AbstractITensorNetwork) = underlying_graph(data_ function NamedGraphs.vertex_to_parent_vertex(tn::AbstractITensorNetwork, vertex) return vertex_to_parent_vertex(underlying_graph(tn), vertex) end +function NamedGraphs.parent_vertex_to_vertex(tn::AbstractITensorNetwork, parent_vertex) + return parent_vertex_to_vertex(underlying_graph(tn), parent_vertex) +end # # Iteration @@ -167,28 +164,23 @@ function Base.setindex!(tn::AbstractITensorNetwork, value, v) return tn end -# Convert to a collection of ITensors (`Vector{ITensor}`). -function Base.Vector{ITensor}(tn::AbstractITensorNetwork) - return [tn[v] for v in vertices(tn)] -end - # Convenience wrapper -function tensors(tn::AbstractITensorNetwork, vertices=vertices(tn)) - return map(v -> tn[v], Indices(vertices)) +function eachtensor(tn::AbstractITensorNetwork, vertices=vertices(tn)) + return map(v -> tn[v], vertices) end # # Promotion and conversion # -function promote_indtypeof(tn::AbstractITensorNetwork) - return mapreduce(promote_indtype, tensors(tn)) do t +function ITensorsExtensions.promote_indtypeof(tn::AbstractITensorNetwork) + return mapreduce(promote_indtype, eachtensor(tn)) do t return indtype(t) end end function NDTensors.scalartype(tn::AbstractITensorNetwork) - return mapreduce(eltype, promote_type, tensors(tn); init=Bool) + return mapreduce(eltype, promote_type, eachtensor(tn); init=Bool) end # TODO: Define `eltype(::AbstractITensorNetwork)` as `ITensor`? @@ -246,8 +238,8 @@ function ITensorMPS.siteinds(tn::AbstractITensorNetwork) end function flatten_siteinds(tn::AbstractITensorNetwork) - # reduce(noncommoninds, tensors(tn)) - return unique(flatten([uniqueinds(tn, v) for v in vertices(tn)])) + # `identity.(...)` narrows the type, maybe there is a better way. + return identity.(flatten(map(v -> siteinds(tn, v), vertices(tn)))) end function ITensorMPS.linkinds(tn::AbstractITensorNetwork) @@ -259,7 +251,8 @@ function ITensorMPS.linkinds(tn::AbstractITensorNetwork) end function flatten_linkinds(tn::AbstractITensorNetwork) - return unique(flatten([commoninds(tn, e) for e in edges(tn)])) + # `identity.(...)` narrows the type, maybe there is a better way. + return identity.(flatten(map(e -> linkinds(tn, e), edges(tn)))) end # @@ -267,13 +260,12 @@ end # function neighbor_tensors(tn::AbstractITensorNetwork, vertex) - return tensors(tn, neighbors(tn, vertex)) + return eachtensor(tn, neighbors(tn, vertex)) end function ITensors.uniqueinds(tn::AbstractITensorNetwork, vertex) - # TODO: Splatting here isn't good, make a version that works for - # collections of ITensors. - return reduce(uniqueinds, Iterators.flatten(([tn[vertex]], neighbor_tensors(tn, vertex)))) + tn_vertex = [tn[vertex]; collect(neighbor_tensors(tn, vertex))] + return reduce(setdiff, inds.(tn_vertex)) end function ITensors.uniqueinds(tn::AbstractITensorNetwork, edge::AbstractEdge) @@ -625,7 +617,11 @@ function neighbor_vertices(ψ::AbstractITensorNetwork, T::ITensor) end function linkinds_combiners(tn::AbstractITensorNetwork; edges=edges(tn)) - combiners = DataGraph(directed_graph(underlying_graph(tn)), ITensor, ITensor) + combiners = DataGraph( + directed_graph(underlying_graph(tn)); + vertex_data_eltype=ITensor, + edge_data_eltype=ITensor, + ) for e in edges C = combiner(linkinds(tn, e); tags=edge_tag(e)) combiners[e] = C @@ -741,6 +737,7 @@ Base.show(io::IO, graph::AbstractITensorNetwork) = show(io, MIME"text/plain"(), # TODO: Move to an `ITensorNetworksVisualizationInterfaceExt` # package extension (and define a `VisualizationInterface` package # based on `ITensorVisualizationCore`.). +using ITensors.ITensorVisualizationCore: ITensorVisualizationCore, visualize function ITensorVisualizationCore.visualize( tn::AbstractITensorNetwork, args...; @@ -751,7 +748,8 @@ function ITensorVisualizationCore.visualize( if !isnothing(vertex_labels_prefix) vertex_labels = [vertex_labels_prefix * string(v) for v in vertices(tn)] end - return visualize(Vector{ITensor}(tn), args...; vertex_labels, kwargs...) + # TODO: Use `tokenize_vertex`. + return visualize(collect(eachtensor(tn)), args...; vertex_labels, kwargs...) end # @@ -776,7 +774,9 @@ function ITensorMPS.linkdim(tn::AbstractITensorNetwork{V}, edge::AbstractEdge{V} end function ITensorMPS.linkdims(tn::AbstractITensorNetwork{V}) where {V} - ld = DataGraph{V,Any,Int}(copy(underlying_graph(tn))) + ld = DataGraph{V}( + copy(underlying_graph(tn)); vertex_data_eltype=Nothing, edge_data_eltype=Int + ) for e in edges(ld) ld[e] = linkdim(tn, e) end diff --git a/src/apply.jl b/src/apply.jl index 9405e550..71c5637d 100644 --- a/src/apply.jl +++ b/src/apply.jl @@ -1,3 +1,4 @@ +using .BaseExtensions: maybe_real using Graphs: has_edge using LinearAlgebra: qr using ITensors: Ops diff --git a/src/caches/beliefpropagationcache.jl b/src/caches/beliefpropagationcache.jl index 282b9ee8..475a8e02 100644 --- a/src/caches/beliefpropagationcache.jl +++ b/src/caches/beliefpropagationcache.jl @@ -1,12 +1,17 @@ using Graphs: IsDirected using SplitApplyCombine: group -using NamedGraphs: unpartitioned_graph -using NamedGraphs: partitionvertices -using NamedGraphs: PartitionVertex using LinearAlgebra: diag using ITensors: dir using ITensors.ITensorMPS: ITensorMPS -using NamedGraphs: boundary_partitionedges, partitionvertices, partitionedges +using NamedGraphs.PartitionedGraphs: + PartitionedGraphs, + PartitionedGraph, + PartitionVertex, + boundary_partitionedges, + partitionvertices, + partitionedges, + unpartitioned_graph +using SimpleTraits: SimpleTraits, Not, @traitfn default_message(inds_e) = ITensor[denseblocks(delta(i)) for i in inds_e] default_messages(ptn::PartitionedGraph) = Dictionary() @@ -77,11 +82,11 @@ end #Forward from partitioned graph for f in [ - :(NamedGraphs.partitioned_graph), - :(NamedGraphs.partitionedge), - :(NamedGraphs.partitionvertices), - :(NamedGraphs.vertices), - :(NamedGraphs.boundary_partitionedges), + :(PartitionedGraphs.partitioned_graph), + :(PartitionedGraphs.partitionedge), + :(PartitionedGraphs.partitionvertices), + :(PartitionedGraphs.vertices), + :(PartitionedGraphs.boundary_partitionedges), :(ITensorMPS.linkinds), ] @eval begin @@ -99,10 +104,8 @@ function message(bp_cache::BeliefPropagationCache, edge::PartitionEdge) mts = messages(bp_cache) return get(mts, edge, default_message(bp_cache, edge)) end -function messages( - bp_cache::BeliefPropagationCache, edges::Vector{<:PartitionEdge}; kwargs... -) - return [message(bp_cache, edge; kwargs...) for edge in edges] +function messages(bp_cache::BeliefPropagationCache, edges; kwargs...) + return map(edge -> message(bp_cache, edge; kwargs...), edges) end function Base.copy(bp_cache::BeliefPropagationCache) @@ -129,7 +132,7 @@ end function environment( bp_cache::BeliefPropagationCache, partition_vertices::Vector{<:PartitionVertex}; - ignore_edges=PartitionEdge[], + ignore_edges=(), ) bpes = boundary_partitionedges(bp_cache, partition_vertices; dir=:in) ms = messages(bp_cache, setdiff(bpes, ignore_edges)) @@ -153,7 +156,7 @@ end function factor(bp_cache::BeliefPropagationCache, vertex::PartitionVertex) ptn = partitioned_tensornetwork(bp_cache) - return Vector{ITensor}(subgraph(ptn, vertex)) + return collect(eachtensor(subgraph(ptn, vertex))) end """ @@ -250,21 +253,18 @@ end """ Update the tensornetwork inside the cache """ -function update_factors( - bp_cache::BeliefPropagationCache, vertices::Vector, factors::Vector{ITensor} -) +function update_factors(bp_cache::BeliefPropagationCache, factors) bp_cache = copy(bp_cache) tn = tensornetwork(bp_cache) - - for (vertex, factor) in zip(vertices, factors) + for vertex in eachindex(factors) # TODO: Add a check that this preserves the graph structure. - setindex_preserve_graph!(tn, factor, vertex) + setindex_preserve_graph!(tn, factors[vertex], vertex) end return bp_cache end function update_factor(bp_cache, vertex, factor) - return update_factors(bp_cache, [vertex], ITensor[factor]) + return update_factors(bp_cache, Dictionary([vertex], [factor])) end function region_scalar(bp_cache::BeliefPropagationCache, pv::PartitionVertex) @@ -279,16 +279,15 @@ end function vertex_scalars( bp_cache::BeliefPropagationCache, - pvs::Vector=partitionvertices(partitioned_tensornetwork(bp_cache)), + pvs=partitionvertices(partitioned_tensornetwork(bp_cache)), ) - return [region_scalar(bp_cache, pv) for pv in pvs] + return map(pv -> region_scalar(bp_cache, pv), pvs) end function edge_scalars( - bp_cache::BeliefPropagationCache, - pes::Vector=partitionedges(partitioned_tensornetwork(bp_cache)), + bp_cache::BeliefPropagationCache, pes=partitionedges(partitioned_tensornetwork(bp_cache)) ) - return [region_scalar(bp_cache, pe) for pe in pes] + return map(pe -> region_scalar(bp_cache, pe), pes) end function scalar_factors_quotient(bp_cache::BeliefPropagationCache) diff --git a/src/contract.jl b/src/contract.jl index a5f3fdd7..c0229849 100644 --- a/src/contract.jl +++ b/src/contract.jl @@ -9,10 +9,17 @@ function NDTensors.contract(tn::AbstractITensorNetwork; alg="exact", kwargs...) end function NDTensors.contract( - alg::Algorithm"exact", tn::AbstractITensorNetwork; sequence=vertices(tn), kwargs... + alg::Algorithm"exact", + tn::AbstractITensorNetwork; + contraction_sequence_kwargs=(;), + sequence=contraction_sequence(tn; contraction_sequence_kwargs...), + kwargs..., ) + # TODO: Use `vertex`. sequence_linear_index = deepmap(v -> vertex_to_parent_vertex(tn, v), sequence) - return contract(Vector{ITensor}(tn); sequence=sequence_linear_index, kwargs...) + # TODO: Use `tokenized_vertex`. + ts = map(pv -> tn[parent_vertex_to_vertex(tn, pv)], 1:nv(tn)) + return contract(ts; sequence=sequence_linear_index, kwargs...) end function NDTensors.contract( @@ -21,7 +28,7 @@ function NDTensors.contract( output_structure::Function=path_graph_structure, kwargs..., ) - return approx_tensornetwork(alg, tn, output_structure; kwargs...) + return contract_approx(alg, tn, output_structure; kwargs...) end function ITensors.scalar(alg::Algorithm, tn::AbstractITensorNetwork; kwargs...) diff --git a/src/approx_itensornetwork/binary_tree_partition.jl b/src/contract_approx/binary_tree_partition.jl similarity index 81% rename from src/approx_itensornetwork/binary_tree_partition.jl rename to src/contract_approx/binary_tree_partition.jl index b6657c19..9c269317 100644 --- a/src/approx_itensornetwork/binary_tree_partition.jl +++ b/src/contract_approx/binary_tree_partition.jl @@ -1,20 +1,27 @@ -using NamedGraphs: pre_order_dfs_vertices using DataGraphs: DataGraph -using ITensors: Index, ITensor, delta, noncommoninds, replaceinds, sim +using ITensors: Index, ITensor, delta, replaceinds, sim using ITensors.NDTensors: Algorithm, @Algorithm_str -using NamedGraphs: disjoint_union, rename_vertices, subgraph +using NamedGraphs.GraphsExtensions: + disjoint_union, + is_binary_arborescence, + is_leaf_vertex, + pre_order_dfs_vertices, + rename_vertices, + root_vertex, + subgraph function _binary_partition(tn::ITensorNetwork, source_inds::Vector{<:Index}) - external_inds = noncommoninds(Vector{ITensor}(tn)...) + external_inds = flatten_siteinds(tn) # add delta tensor to each external ind external_sim_ind = [sim(ind) for ind in external_inds] tn = map_data(t -> replaceinds(t, external_inds => external_sim_ind), tn; edges=[]) tn_wo_deltas = rename_vertices(v -> v[1], subgraph(v -> v[2] == 1, tn)) - deltas = Vector{ITensor}(subgraph(v -> v[2] == 2, tn)) + deltas = collect(eachtensor(subgraph(v -> v[2] == 2, tn))) scalars = rename_vertices(v -> v[1], subgraph(v -> v[2] == 3, tn)) new_deltas = [ delta(external_inds[i], external_sim_ind[i]) for i in 1:length(external_inds) ] + # TODO: Combine in a more elegant way, so with `disjoint_union`. deltas = [deltas..., new_deltas...] tn = disjoint_union(tn_wo_deltas, ITensorNetwork(deltas), scalars) p1, p2 = _mincut_partition_maxweightoutinds( @@ -22,8 +29,8 @@ function _binary_partition(tn::ITensorNetwork, source_inds::Vector{<:Index}) ) source_tn = _contract_deltas(subgraph(tn, p1)) remain_tn = _contract_deltas(subgraph(tn, p2)) - outinds_source = noncommoninds(Vector{ITensor}(source_tn)...) - outinds_remain = noncommoninds(Vector{ITensor}(remain_tn)...) + outinds_source = flatten_siteinds(source_tn) + outinds_remain = flatten_siteinds(remain_tn) common_inds = intersect(outinds_source, outinds_remain) @assert ( length(external_inds) == @@ -68,20 +75,20 @@ Note: name of vertices in the output partition are the same as the name of verti function _partition( ::Algorithm"mincut_recursive_bisection", tn::ITensorNetwork, inds_btree::DataGraph ) - @assert _is_rooted_directed_binary_tree(inds_btree) - output_tns = Vector{ITensorNetwork}() + @assert is_binary_arborescence(inds_btree) + output_tns = Vector{ITensorNetwork{vertextype(tn)}}() output_deltas_vector = Vector{Vector{ITensor}}() scalars_vector = Vector{Vector{ITensor}}() # Mapping each vertex of the binary tree to a tn representing the partition # of the subtree containing this vertex and its descendant vertices. leaves = leaf_vertices(inds_btree) - root = _root(inds_btree) - v_to_subtree_tn = Dict{vertextype(inds_btree),ITensorNetwork}() - v_to_subtree_tn[root] = disjoint_union(tn, ITensorNetwork()) + root = root_vertex(inds_btree) + v_to_subtree_tn = Dict{eltype(inds_btree),ITensorNetwork{Tuple{vertextype(tn),Int}}}() + v_to_subtree_tn[root] = disjoint_union(tn, ITensorNetwork{vertextype(tn)}()) for v in pre_order_dfs_vertices(inds_btree, root) @assert haskey(v_to_subtree_tn, v) input_tn = v_to_subtree_tn[v] - if !is_leaf(inds_btree, v) + if !is_leaf_vertex(inds_btree, v) c1, c2 = child_vertices(inds_btree, v) descendant_c1 = pre_order_dfs_vertices(inds_btree, c1) indices = [inds_btree[l] for l in intersect(descendant_c1, leaves)] @@ -93,26 +100,26 @@ function _partition( v_to_subtree_tn[c2] = tn1 end tn = rename_vertices(u -> u[1], subgraph(u -> u[2] == 1, input_tn)) - deltas = Vector{ITensor}(subgraph(u -> u[2] == 2, input_tn)) - scalars = Vector{ITensor}(subgraph(u -> u[2] == 3, input_tn)) + deltas = collect(eachtensor(subgraph(u -> u[2] == 2, input_tn))) + scalars = collect(eachtensor(subgraph(u -> u[2] == 3, input_tn))) push!(output_tns, tn) push!(output_deltas_vector, deltas) push!(scalars_vector, scalars) end # In subgraph_vertices, each element is a vector of vertices to be # grouped in one partition. - subgraph_vs = Vector{Vector{Tuple}}() + subgraph_vs = Vector{Vector{Tuple{vertextype(tn),Int}}}() delta_num = 0 scalar_num = 0 for (tn, deltas, scalars) in zip(output_tns, output_deltas_vector, scalars_vector) - vs = Vector{Tuple}([(v, 1) for v in vertices(tn)]) + vs = [(v, 1) for v in vertices(tn)] vs = vcat(vs, [(i + delta_num, 2) for i in 1:length(deltas)]) vs = vcat(vs, [(i + scalar_num, 3) for i in 1:length(scalars)]) push!(subgraph_vs, vs) delta_num += length(deltas) scalar_num += length(scalars) end - out_tn = ITensorNetwork() + out_tn = ITensorNetwork{vertextype(tn)}() for tn in output_tns for v in vertices(tn) add_vertex!(out_tn, v) @@ -123,11 +130,11 @@ function _partition( tn_scalars = ITensorNetwork(vcat(scalars_vector...)) par = _partition(disjoint_union(out_tn, tn_deltas, tn_scalars), subgraph_vs) @assert is_tree(par) - name_map = Dict() + name_map = Dict{Int,vertextype(tn)}() for (i, v) in enumerate(pre_order_dfs_vertices(inds_btree, root)) name_map[i] = v end - return rename_vertices(par, name_map) + return rename_vertices(v -> name_map[v], par) end function _partition(tn::ITensorNetwork, inds_btree::DataGraph; alg) diff --git a/src/approx_itensornetwork/approx_itensornetwork.jl b/src/contract_approx/contract_approx.jl similarity index 59% rename from src/approx_itensornetwork/approx_itensornetwork.jl rename to src/contract_approx/contract_approx.jl index 2b8f8518..dc6a9b44 100644 --- a/src/approx_itensornetwork/approx_itensornetwork.jl +++ b/src/contract_approx/contract_approx.jl @@ -1,21 +1,22 @@ +using NamedGraphs.GraphsExtensions: is_binary_arborescence, root_vertex + # Density matrix algorithm and ttn_svd algorithm """ Approximate a `binary_tree_partition` into an output ITensorNetwork with the same binary tree structure. `root` is the root vertex of the pre-order depth-first-search traversal used to perform the truncations. """ -function approx_tensornetwork( +function contract_approx( ::Algorithm"density_matrix", binary_tree_partition::DataGraph; root, cutoff=1e-15, maxdim=10000, - contraction_sequence_alg, contraction_sequence_kwargs, ) @assert is_tree(binary_tree_partition) @assert root in vertices(binary_tree_partition) - @assert _is_rooted_directed_binary_tree(dfs_tree(binary_tree_partition, root)) + @assert is_binary_arborescence(dfs_tree(binary_tree_partition, root)) # The `binary_tree_partition` may contain multiple delta tensors to make sure # the partition has a binary tree structure. These delta tensors could hurt the # performance when computing density matrices so we remove them first. @@ -28,30 +29,23 @@ function approx_tensornetwork( root, cutoff, maxdim, - contraction_sequence_alg, contraction_sequence_kwargs, ) end -function approx_tensornetwork( +function contract_approx( ::Algorithm"ttn_svd", binary_tree_partition::DataGraph; root, cutoff=1e-15, maxdim=10000, - contraction_sequence_alg, contraction_sequence_kwargs, ) @assert is_tree(binary_tree_partition) @assert root in vertices(binary_tree_partition) - @assert _is_rooted_directed_binary_tree(dfs_tree(binary_tree_partition, root)) + @assert is_binary_arborescence(dfs_tree(binary_tree_partition, root)) return _approx_itensornetwork_ttn_svd!( - binary_tree_partition; - root, - cutoff, - maxdim, - contraction_sequence_alg, - contraction_sequence_kwargs, + binary_tree_partition; root, cutoff, maxdim, contraction_sequence_kwargs ) end @@ -60,34 +54,22 @@ Approximate a given ITensorNetwork `tn` into an output ITensorNetwork with a binary tree structure. The binary tree structure is defined based on `inds_btree`, which is a directed binary tree DataGraph of indices. """ -function approx_tensornetwork( +function contract_approx( alg::Union{Algorithm"density_matrix",Algorithm"ttn_svd"}, tn::ITensorNetwork, inds_btree::DataGraph; cutoff=1e-15, maxdim=10000, - contraction_sequence_alg="optimal", contraction_sequence_kwargs=(;), ) par = _partition(tn, inds_btree; alg="mincut_recursive_bisection") - output_tn, log_root_norm = approx_tensornetwork( - alg, - par; - root=_root(inds_btree), - cutoff=cutoff, - maxdim=maxdim, - contraction_sequence_alg=contraction_sequence_alg, - contraction_sequence_kwargs=contraction_sequence_kwargs, + output_tn, log_root_norm = contract_approx( + alg, par; root=root_vertex(inds_btree), cutoff, maxdim, contraction_sequence_kwargs ) # Each leaf vertex in `output_tn` is adjacent to one output index. # We remove these leaf vertices so that each non-root vertex in `output_tn` # is an order 3 tensor. - _rem_leaf_vertices!( - output_tn; - root=_root(inds_btree), - contraction_sequence_alg=contraction_sequence_alg, - contraction_sequence_kwargs=contraction_sequence_kwargs, - ) + _rem_leaf_vertices!(output_tn; root=root_vertex(inds_btree), contraction_sequence_kwargs) return output_tn, log_root_norm end @@ -95,84 +77,54 @@ end Approximate a given ITensorNetwork `tn` into an output ITensorNetwork with `output_structure`. `output_structure` outputs a directed binary tree DataGraph defining the desired graph structure. """ -function approx_tensornetwork( +function contract_approx( alg::Union{Algorithm"density_matrix",Algorithm"ttn_svd"}, tn::ITensorNetwork, output_structure::Function=path_graph_structure; cutoff=1e-15, maxdim=10000, - contraction_sequence_alg="optimal", contraction_sequence_kwargs=(;), ) inds_btree = output_structure(tn) - return approx_tensornetwork( - alg, - tn, - inds_btree; - cutoff=cutoff, - maxdim=maxdim, - contraction_sequence_alg=contraction_sequence_alg, - contraction_sequence_kwargs=contraction_sequence_kwargs, - ) + return contract_approx(alg, tn, inds_btree; cutoff, maxdim, contraction_sequence_kwargs) end # interface -function approx_tensornetwork( +function contract_approx( partitioned_tn::DataGraph; alg::String, root, cutoff=1e-15, maxdim=10000, - contraction_sequence_alg="optimal", contraction_sequence_kwargs=(;), ) - return approx_tensornetwork( - Algorithm(alg), - partitioned_tn; - root, - cutoff, - maxdim, - contraction_sequence_alg, - contraction_sequence_kwargs, + return contract_approx( + Algorithm(alg), partitioned_tn; root, cutoff, maxdim, contraction_sequence_kwargs ) end -function approx_tensornetwork( +function contract_approx( tn::ITensorNetwork, inds_btree::DataGraph; alg::String, cutoff=1e-15, maxdim=10000, - contraction_sequence_alg="optimal", contraction_sequence_kwargs=(;), ) - return approx_tensornetwork( - Algorithm(alg), - tn, - inds_btree; - cutoff, - maxdim, - contraction_sequence_alg, - contraction_sequence_kwargs, + return contract_approx( + Algorithm(alg), tn, inds_btree; cutoff, maxdim, contraction_sequence_kwargs ) end -function approx_tensornetwork( +function contract_approx( tn::ITensorNetwork, output_structure::Function=path_graph_structure; alg::String, cutoff=1e-15, maxdim=10000, - contraction_sequence_alg="optimal", contraction_sequence_kwargs=(;), ) - return approx_tensornetwork( - Algorithm(alg), - tn, - output_structure; - cutoff, - maxdim, - contraction_sequence_alg, - contraction_sequence_kwargs, + return contract_approx( + Algorithm(alg), tn, output_structure; cutoff, maxdim, contraction_sequence_kwargs ) end diff --git a/src/contract_deltas.jl b/src/contract_approx/contract_deltas.jl similarity index 90% rename from src/contract_deltas.jl rename to src/contract_approx/contract_deltas.jl index 818044b0..6dfc61bf 100644 --- a/src/contract_deltas.jl +++ b/src/contract_approx/contract_deltas.jl @@ -1,5 +1,6 @@ -using ITensors.NDTensors: ind using DataStructures: DataStructures, DisjointSets, find_root! +using ITensors.NDTensors: ind +using .ITensorsExtensions: is_delta """ Rewrite of the function @@ -85,26 +86,26 @@ Example: 4 │ ((dim=2|id=626|"6"), (dim=2|id=237|"5")) """ function _contract_deltas(tn::ITensorNetwork) - network = Vector{ITensor}(tn) - deltas = filter(t -> is_delta(t), network) - if deltas == [] + deltas = filter(is_delta, collect(eachtensor(tn))) + if isempty(deltas) return tn end tn = copy(tn) - outinds = noncommoninds(network...) + outinds = flatten_siteinds(tn) ds = _delta_inds_disjointsets(deltas, outinds) deltainds = [ds...] sim_deltainds = [find_root!(ds, i) for i in deltainds] # `rem_vertex!(tn, v)` changes `vertices(tn)` in place. # We copy it here so that the enumeration won't be affected. - for v in copy(vertices(tn)) + vs = copy(vertices(tn)) + for v in vs if !is_delta(tn[v]) tn[v] = replaceinds(tn[v], deltainds, sim_deltainds) continue end i1, i2 = inds(tn[v]) root = find_root!(ds, i1) - @assert root === find_root!(ds, i2) + @assert root == find_root!(ds, i2) if i1 != root && i1 in outinds tn[v] = delta(i1, root) elseif i2 != root && i2 in outinds @@ -133,7 +134,7 @@ function _contract_deltas_ignore_leaf_partitions( nonleaves = setdiff(vertices(partition), leaves) rootinds = _noncommoninds(subgraph(partition, nonleaves)) # check rootinds are not noncommoninds of the partition - @assert intersect(rootinds, _noncommoninds(partition)) == [] + @assert isempty(intersect(rootinds, _noncommoninds(partition))) nonleaves_tn = _contract_deltas(reduce(union, [partition[v] for v in nonleaves])) nondelta_vs = filter(v -> !is_delta(nonleaves_tn[v]), vertices(nonleaves_tn)) for v in nonleaves @@ -142,18 +143,18 @@ function _contract_deltas_ignore_leaf_partitions( # Note: we also need to change inds in the leaves since they can be connected by deltas # in nonleaf vertices delta_vs = setdiff(vertices(nonleaves_tn), nondelta_vs) - if delta_vs == [] + if isempty(delta_vs) return partition end ds = _delta_inds_disjointsets( Vector{ITensor}(subgraph(nonleaves_tn, delta_vs)), Vector{Index}() ) - deltainds = [ds...] - sim_deltainds = [find_root!(ds, ind) for ind in deltainds] + deltainds = Index[ds...] + sim_deltainds = Index[find_root!(ds, ind) for ind in deltainds] for tn_v in leaves - partition[tn_v] = map_data( - t -> replaceinds(t, deltainds, sim_deltainds), partition[tn_v]; edges=[] - ) + partition[tn_v] = map_data(partition[tn_v]; edges=[]) do t + return replaceinds(t, deltainds, sim_deltainds) + end end return partition end diff --git a/src/approx_itensornetwork/density_matrix.jl b/src/contract_approx/density_matrix.jl similarity index 86% rename from src/approx_itensornetwork/density_matrix.jl rename to src/contract_approx/density_matrix.jl index 32e7c30b..9e745a8b 100644 --- a/src/approx_itensornetwork/density_matrix.jl +++ b/src/contract_approx/density_matrix.jl @@ -1,4 +1,9 @@ -using LinearAlgebra: ishermitian +using DataGraphs: DataGraph +using Graphs: vertices +using ITensors: ITensor, inds +using LinearAlgebra: ishermitian, norm +using NamedGraphs: NamedEdge +using NamedGraphs.GraphsExtensions: child_vertices, parent_vertex, post_order_dfs_vertices """ The struct contains cached density matrices and cached partial density matrices @@ -146,7 +151,7 @@ end function _get_low_rank_projector(tensor, inds1, inds2; cutoff, maxdim) @assert length(inds(tensor)) <= 4 - F = eigen(tensor, inds1, inds2; cutoff=cutoff, maxdim=maxdim, ishermitian=true) + F = eigen(tensor, inds1, inds2; cutoff, maxdim, ishermitian=true) return F.Vt end @@ -155,7 +160,7 @@ Returns a dict that maps the partition's outinds that are adjacent to `partition """ function _densitymatrix_outinds_to_sim(partition, root) outinds = _noncommoninds(partition) - outinds_root = intersect(outinds, noncommoninds(Vector{ITensor}(partition[root])...)) + outinds_root = intersect(outinds, flatten_siteinds(partition[root])) outinds_root_to_sim = Dict(zip(outinds_root, [sim(ind) for ind in outinds_root])) return outinds_root_to_sim end @@ -191,7 +196,6 @@ function _update!( children::Vector, network::Vector{ITensor}, inds_to_sim; - contraction_sequence_alg, contraction_sequence_kwargs, ) v = dst(edge) @@ -204,30 +208,22 @@ function _update!( es = [NamedEdge(src_v, v) for src_v in setdiff(children, child_v)] es = Set(vcat(es, [edge])) if !haskey(caches.es_to_pdm, es) - caches.es_to_pdm[es] = _optcontract( - [dm_tensor, network...]; contraction_sequence_alg, contraction_sequence_kwargs - ) + caches.es_to_pdm[es] = _optcontract([dm_tensor; network]; contraction_sequence_kwargs) end push!(pdms, caches.es_to_pdm[es]) end if length(pdms) == 0 sim_network = map(x -> replaceinds(x, inds_to_sim), network) sim_network = map(dag, sim_network) - density_matrix = _optcontract( - [network..., sim_network...]; contraction_sequence_alg, contraction_sequence_kwargs - ) + density_matrix = _optcontract([network; sim_network]; contraction_sequence_kwargs) elseif length(pdms) == 1 sim_network = map(x -> replaceinds(x, inds_to_sim), network) sim_network = map(dag, sim_network) - density_matrix = _optcontract( - [pdms[1], sim_network...]; contraction_sequence_alg, contraction_sequence_kwargs - ) + density_matrix = _optcontract([pdms[1]; sim_network]; contraction_sequence_kwargs) else simtensor = _sim(pdms[2], inds_to_sim) simtensor = dag(simtensor) - density_matrix = _optcontract( - [pdms[1], simtensor]; contraction_sequence_alg, contraction_sequence_kwargs - ) + density_matrix = _optcontract([pdms[1], simtensor]; contraction_sequence_kwargs) end caches.e_to_dm[edge] = density_matrix return nothing @@ -259,12 +255,7 @@ Example: and the returned tensor `U` will be the projector at vertex 4 in the output tn. """ function _rem_vertex!( - alg_graph::_DensityMartrixAlgGraph, - root; - cutoff, - maxdim, - contraction_sequence_alg, - contraction_sequence_kwargs, + alg_graph::_DensityMartrixAlgGraph, root; cutoff, maxdim, contraction_sequence_kwargs ) caches = alg_graph.caches outinds_root_to_sim = _densitymatrix_outinds_to_sim(alg_graph.partition, root) @@ -274,14 +265,13 @@ function _rem_vertex!( for v in post_order_dfs_vertices(dm_dfs_tree, root) children = sort(child_vertices(dm_dfs_tree, v)) @assert length(children) <= 2 - network = Vector{ITensor}(alg_graph.partition[v]) + network = collect(eachtensor(alg_graph.partition[v])) _update!( caches, NamedEdge(parent_vertex(dm_dfs_tree, v), v), children, - Vector{ITensor}(network), + network, inds_to_sim; - contraction_sequence_alg, contraction_sequence_kwargs, ) end @@ -294,9 +284,7 @@ function _rem_vertex!( ) # update partition and out_tree root_tensor = _optcontract( - [Vector{ITensor}(alg_graph.partition[root])..., dag(U)]; - contraction_sequence_alg, - contraction_sequence_kwargs, + [collect(eachtensor(alg_graph.partition[root])); dag(U)]; contraction_sequence_kwargs ) new_root = child_vertices(dm_dfs_tree, root)[1] alg_graph.partition[new_root] = disjoint_union( @@ -319,9 +307,7 @@ function _rem_vertex!( end @assert length(new_es) >= 1 caches.es_to_pdm[new_es] = _optcontract( - [caches.es_to_pdm[es], root_tensor]; - contraction_sequence_alg, - contraction_sequence_kwargs, + [caches.es_to_pdm[es], root_tensor]; contraction_sequence_kwargs ) end # Remove old caches since they won't be used anymore, @@ -345,12 +331,11 @@ function _approx_itensornetwork_density_matrix!( root=first(vertices(partition)), cutoff=1e-15, maxdim=10000, - contraction_sequence_alg, contraction_sequence_kwargs, ) # Change type of each partition[v] since they will be updated # with potential data type chage. - partition = DataGraph() + partition = DataGraph(NamedGraph()) for v in vertices(input_partition) add_vertex!(partition, v) partition[v] = ITensorNetwork{Any}(input_partition[v]) @@ -359,16 +344,14 @@ function _approx_itensornetwork_density_matrix!( alg_graph = _DensityMartrixAlgGraph(partition, out_tree, root) output_tn = ITensorNetwork() for v in post_order_dfs_vertices(out_tree, root)[1:(end - 1)] - U = _rem_vertex!( - alg_graph, v; cutoff, maxdim, contraction_sequence_alg, contraction_sequence_kwargs - ) + U = _rem_vertex!(alg_graph, v; cutoff, maxdim, contraction_sequence_kwargs) add_vertex!(output_tn, v) output_tn[v] = U end @assert length(vertices(partition)) == 1 add_vertex!(output_tn, root) root_tensor = _optcontract( - Vector{ITensor}(partition[root]); contraction_sequence_alg, contraction_sequence_kwargs + collect(eachtensor(partition[root])); contraction_sequence_kwargs ) root_norm = norm(root_tensor) root_tensor /= root_norm diff --git a/src/mincut.jl b/src/contract_approx/mincut.jl similarity index 88% rename from src/mincut.jl rename to src/contract_approx/mincut.jl index fdb9d290..995a5e14 100644 --- a/src/mincut.jl +++ b/src/contract_approx/mincut.jl @@ -11,14 +11,14 @@ MAX_WEIGHT = 1e32 Outputs a maximimally unbalanced directed binary tree DataGraph defining the desired graph structure """ function path_graph_structure(tn::ITensorNetwork) - return path_graph_structure(tn, noncommoninds(Vector{ITensor}(tn)...)) + return path_graph_structure(tn, flatten_siteinds(tn)) end """ Given a `tn` and `outinds` (a subset of noncommoninds of `tn`), outputs a maximimally unbalanced directed binary tree DataGraph of `outinds` defining the desired graph structure """ -function path_graph_structure(tn::ITensorNetwork, outinds::Vector{<:Index}) +function path_graph_structure(tn::ITensorNetwork, outinds::Vector) return _binary_tree_structure(tn, outinds; maximally_unbalanced=true) end @@ -26,14 +26,14 @@ end Outputs a directed binary tree DataGraph defining the desired graph structure """ function binary_tree_structure(tn::ITensorNetwork) - return binary_tree_structure(tn, noncommoninds(Vector{ITensor}(tn)...)) + return binary_tree_structure(tn, flatten_siteinds(tn)) end """ Given a `tn` and `outinds` (a subset of noncommoninds of `tn`), outputs a directed binary tree DataGraph of `outinds` defining the desired graph structure """ -function binary_tree_structure(tn::ITensorNetwork, outinds::Vector{<:Index}) +function binary_tree_structure(tn::ITensorNetwork, outinds::Vector) return _binary_tree_structure(tn, outinds; maximally_unbalanced=false) end @@ -43,12 +43,10 @@ Calculate the mincut between two subsets of the uncontracted inds Mincut of two inds list is defined as the mincut of two newly added vertices, each one neighboring to one inds subset. """ -function _mincut( - tn::ITensorNetwork, source_inds::Vector{<:Index}, terminal_inds::Vector{<:Index} -) +function _mincut(tn::ITensorNetwork, source_inds::Vector, terminal_inds::Vector) @assert length(source_inds) >= 1 @assert length(terminal_inds) >= 1 - noncommon_inds = noncommoninds(Vector{ITensor}(tn)...) + noncommon_inds = flatten_siteinds(tn) @assert issubset(source_inds, noncommon_inds) @assert issubset(terminal_inds, noncommon_inds) tn = disjoint_union( @@ -61,9 +59,7 @@ end Calculate the mincut_partitions between two subsets of the uncontracted inds (source_inds and terminal_inds) of the input tn. """ -function _mincut_partitions( - tn::ITensorNetwork, source_inds::Vector{<:Index}, terminal_inds::Vector{<:Index} -) +function _mincut_partitions(tn::ITensorNetwork, source_inds::Vector, terminal_inds::Vector) p1, p2, cut = _mincut(tn, source_inds, terminal_inds) p1 = [v[1] for v in p1 if v[2] == 2] p2 = [v[1] for v in p2 if v[2] == 2] @@ -71,7 +67,7 @@ function _mincut_partitions( end function _mincut_partition_maxweightoutinds( - tn::ITensorNetwork, source_inds::Vector{<:Index}, terminal_inds::Vector{<:Index} + tn::ITensorNetwork, source_inds::Vector, terminal_inds::Vector ) tn, out_to_maxweight_ind = _maxweightoutinds_tn(tn, [source_inds..., terminal_inds...]) source_inds = [out_to_maxweight_ind[i] for i in source_inds] @@ -82,9 +78,9 @@ end """ Sum of shortest path distances among all outinds. """ -function _distance(tn::ITensorNetwork, outinds::Vector{<:Index}) +function _distance(tn::ITensorNetwork, outinds::Vector) @assert length(outinds) >= 1 - @assert issubset(outinds, noncommoninds(Vector{ITensor}(tn)...)) + @assert issubset(outinds, flatten_siteinds(tn)) if length(outinds) == 1 return 0.0 end @@ -105,8 +101,8 @@ create a tn with empty ITensors whose outinds weights are MAX_WEIGHT The maxweight_tn is constructed so that only commoninds of the tn will be considered in mincut. """ -function _maxweightoutinds_tn(tn::ITensorNetwork, outinds::Union{Nothing,Vector{<:Index}}) - @assert issubset(outinds, noncommoninds(Vector{ITensor}(tn)...)) +function _maxweightoutinds_tn(tn::ITensorNetwork, outinds::Union{Nothing,Vector}) + @assert issubset(outinds, flatten_siteinds(tn)) out_to_maxweight_ind = Dict{Index,Index}() for ind in outinds out_to_maxweight_ind[ind] = Index(MAX_WEIGHT, ind.tags) @@ -132,7 +128,7 @@ Example: # TODO """ function _binary_tree_structure( - tn::ITensorNetwork, outinds::Vector{<:Index}; maximally_unbalanced::Bool=false + tn::ITensorNetwork, outinds::Vector; maximally_unbalanced::Bool=false ) inds_tree_vector = _binary_tree_partition_inds( tn, outinds; maximally_unbalanced=maximally_unbalanced @@ -141,7 +137,7 @@ function _binary_tree_structure( end function _binary_tree_partition_inds( - tn::ITensorNetwork, outinds::Vector{<:Index}; maximally_unbalanced::Bool=false + tn::ITensorNetwork, outinds::Vector; maximally_unbalanced::Bool=false ) if length(outinds) == 1 return outinds @@ -164,7 +160,7 @@ function _nested_vector_to_directed_tree(inds_tree_vector::Vector) return inds_btree end treenode_to_v = Dict{Union{Vector,Index},Int}() - graph = DataGraph(NamedDiGraph(), Index) + graph = DataGraph(NamedDiGraph(); edge_data_eltype=Index) v = 1 for treenode in PostOrderDFS(inds_tree_vector) add_vertex!(graph, v) @@ -184,11 +180,9 @@ end """ Given a tn and outinds, returns a vector of indices representing MPS inds ordering. """ -function _mps_partition_inds_order( - tn::ITensorNetwork, outinds::Union{Nothing,Vector{<:Index}} -) +function _mps_partition_inds_order(tn::ITensorNetwork, outinds::Union{Nothing,Vector}) if outinds == nothing - outinds = noncommoninds(Vector{ITensor}(tn)...) + outinds = flatten_siteinds(tn) end if length(outinds) == 1 return outinds @@ -258,7 +252,7 @@ Note: """ function _mincut_inds( tn_pair::Pair{<:ITensorNetwork,<:ITensorNetwork}, - out_to_maxweight_ind::Dict{Index,Index}, + out_to_maxweight_ind::Dict{<:Index,<:Index}, sourceinds_list::Vector{<:Vector{<:Index}}, ) function _mincut_value(tn, sinds, outinds) diff --git a/src/approx_itensornetwork/partition.jl b/src/contract_approx/partition.jl similarity index 73% rename from src/approx_itensornetwork/partition.jl rename to src/contract_approx/partition.jl index 7dd994f5..6a7ae702 100644 --- a/src/approx_itensornetwork/partition.jl +++ b/src/contract_approx/partition.jl @@ -1,28 +1,34 @@ -using DataGraphs: AbstractDataGraph, DataGraph, edge_data, vertex_data +using DataGraphs: AbstractDataGraph, DataGraph, edge_data, edge_data_eltype, vertex_data using Dictionaries: Dictionary using Graphs: AbstractGraph, add_edge!, has_edge, dst, edges, edgetype, src, vertices using ITensors: ITensor, noncommoninds using NamedGraphs: NamedGraph, subgraph +using SplitApplyCombine: flatten function _partition(g::AbstractGraph, subgraph_vertices) partitioned_graph = DataGraph( - NamedGraph(eachindex(subgraph_vertices)), - map(vs -> subgraph(g, vs), Dictionary(subgraph_vertices)), + NamedGraph(eachindex(subgraph_vertices)); + vertex_data_eltype=typeof(g), + edge_data_eltype=@NamedTuple{ + edges::Vector{edgetype(g)}, edge_data::Dictionary{edgetype(g),edge_data_eltype(g)} + } ) + for v in vertices(partitioned_graph) + partitioned_graph[v] = subgraph(g, subgraph_vertices[v]) + end for e in edges(g) s1 = findfirst_on_vertices(subgraph -> src(e) ∈ vertices(subgraph), partitioned_graph) s2 = findfirst_on_vertices(subgraph -> dst(e) ∈ vertices(subgraph), partitioned_graph) if (!has_edge(partitioned_graph, s1, s2) && s1 ≠ s2) add_edge!(partitioned_graph, s1, s2) - partitioned_graph[s1 => s2] = Dictionary( - [:edges, :edge_data], - [Vector{edgetype(g)}(), Dictionary{edgetype(g),edge_data_type(g)}()], + partitioned_graph[s1 => s2] = (; + edges=Vector{edgetype(g)}(), edge_data=Dictionary{edgetype(g),edge_data_eltype(g)}() ) end if has_edge(partitioned_graph, s1, s2) - push!(partitioned_graph[s1 => s2][:edges], e) + push!(partitioned_graph[s1 => s2].edges, e) if isassigned(g, e) - set!(partitioned_graph[s1 => s2][:edge_data], e, g[e]) + set!(partitioned_graph[s1 => s2].edge_data, e, g[e]) end end end @@ -86,20 +92,13 @@ end # return subgraphs(g, subgraph_vertices(g; npartitions, nvertices_per_partition, kwargs...)) # end -""" - TODO: do we want to make it a public function? -""" function _noncommoninds(partition::DataGraph) - networks = [Vector{ITensor}(partition[v]) for v in vertices(partition)] - network = vcat(networks...) - return noncommoninds(network...) + tn = mapreduce(v -> collect(eachtensor(partition[v])), vcat, vertices(partition)) + return unique(flatten_siteinds(ITensorNetwork(tn))) end # Util functions for partition function _commoninds(partition::DataGraph) - networks = [Vector{ITensor}(partition[v]) for v in vertices(partition)] - network = vcat(networks...) - outinds = noncommoninds(network...) - allinds = mapreduce(t -> [i for i in inds(t)], vcat, network) - return Vector(setdiff(allinds, outinds)) + tn = mapreduce(v -> collect(eachtensor(partition[v])), vcat, vertices(partition)) + return unique(flatten_linkinds(ITensorNetwork(tn))) end diff --git a/src/approx_itensornetwork/ttn_svd.jl b/src/contract_approx/ttn_svd.jl similarity index 55% rename from src/approx_itensornetwork/ttn_svd.jl rename to src/contract_approx/ttn_svd.jl index 59797c3e..25e0a0e6 100644 --- a/src/approx_itensornetwork/ttn_svd.jl +++ b/src/contract_approx/ttn_svd.jl @@ -1,4 +1,8 @@ -using IterTools: partition +using DataGraphs: DataGraph +using Graphs: add_vertex!, vertices +using LinearAlgebra: norm +using NamedGraphs.GraphsExtensions: vertextype + """ Approximate a `partition` into an output ITensorNetwork with the binary tree structure defined by `out_tree` by @@ -10,23 +14,18 @@ function _approx_itensornetwork_ttn_svd!( root=first(vertices(partition)), cutoff=1e-15, maxdim=10000, - contraction_sequence_alg, contraction_sequence_kwargs, ) - tn = ITensorNetwork() + tn = ITensorNetwork{vertextype(input_partition)}() for v in vertices(input_partition) add_vertex!(tn, v) tn[v] = _optcontract( - Vector{ITensor}(input_partition[v]); - contraction_sequence_alg=contraction_sequence_alg, - contraction_sequence_kwargs=contraction_sequence_kwargs, + collect(eachtensor(input_partition[v])); contraction_sequence_kwargs ) end - truncate_ttn = truncate(ttn(tn); cutoff=cutoff, maxdim=maxdim, root_vertex=root) + truncate_ttn = truncate(ttn(tn); cutoff, maxdim, root_vertex=root) out_tn = ITensorNetwork(truncate_ttn) - root_tensor = out_tn[root] - root_norm = norm(root_tensor) - root_tensor /= root_norm - out_tn[root] = root_tensor + root_norm = norm(out_tn[root]) + out_tn[root] /= root_norm return out_tn, log(root_norm) end diff --git a/src/approx_itensornetwork/utils.jl b/src/contract_approx/utils.jl similarity index 57% rename from src/approx_itensornetwork/utils.jl rename to src/contract_approx/utils.jl index ea0a4027..ecebb85d 100644 --- a/src/approx_itensornetwork/utils.jl +++ b/src/contract_approx/utils.jl @@ -1,5 +1,7 @@ -using NamedGraphs: parent_vertex -using Graphs: dfs_tree +using NamedGraphs.GraphsExtensions: leaf_vertices, parent_vertex +using Graphs: dfs_tree, rem_vertex!, vertices +using ITensors: ITensor + """ For a given ITensorNetwork `tn` and a `root` vertex, remove leaf vertices in the directed tree with root `root` without changing the tensor represented by tn. @@ -7,18 +9,13 @@ In particular, the tensor of each leaf vertex is contracted with the tensor of i to keep the tensor unchanged. """ function _rem_leaf_vertices!( - tn::ITensorNetwork; - root=first(vertices(tn)), - contraction_sequence_alg, - contraction_sequence_kwargs, + tn::ITensorNetwork; root=first(vertices(tn)), contraction_sequence_kwargs ) dfs_t = dfs_tree(tn, root) leaves = leaf_vertices(dfs_t) parents = [parent_vertex(dfs_t, leaf) for leaf in leaves] for (l, p) in zip(leaves, parents) - tn[p] = _optcontract( - [tn[p], tn[l]]; contraction_sequence_alg, contraction_sequence_kwargs - ) + tn[p] = _optcontract([tn[p], tn[l]]; contraction_sequence_kwargs) rem_vertex!(tn, l) end end @@ -26,16 +23,12 @@ end """ Contract of a vector of tensors, `network`, with a contraction sequence generated via sa_bipartite """ -function _optcontract( - network::Vector; contraction_sequence_alg="optimal", contraction_sequence_kwargs=(;) -) +function _optcontract(network::Vector; contraction_sequence_kwargs=(;)) if length(network) == 0 - return ITensor(1.0) + return ITensor(1) end @assert network isa Vector{ITensor} - seq = contraction_sequence( - network; alg=contraction_sequence_alg, contraction_sequence_kwargs... - ) - output = contract(network; sequence=seq) + sequence = contraction_sequence(network; contraction_sequence_kwargs...) + output = contract(network; sequence) return output end diff --git a/src/contraction_sequences.jl b/src/contraction_sequences.jl index aca2254b..3496340d 100644 --- a/src/contraction_sequences.jl +++ b/src/contraction_sequences.jl @@ -2,16 +2,19 @@ using Graphs: vertices using ITensors: ITensor, contract using ITensors.ContractionSequenceOptimization: deepmap, optimal_contraction_sequence using ITensors.NDTensors: Algorithm, @Algorithm_str -using NamedGraphs: Key +using NamedGraphs: parent_vertex_to_vertex +using NamedGraphs.Keys: Key function contraction_sequence(tn::Vector{ITensor}; alg="optimal", kwargs...) return contraction_sequence(Algorithm(alg), tn; kwargs...) end function contraction_sequence(tn::AbstractITensorNetwork; kwargs...) - seq_linear_index = contraction_sequence(Vector{ITensor}(tn); kwargs...) - # TODO: Use Functors.fmap? - return deepmap(n -> Key(vertices(tn)[n]), seq_linear_index) + # TODO: Use `token_vertex` and/or `token_vertices` here. + ts = map(pv -> tn[parent_vertex_to_vertex(tn, pv)], 1:nv(tn)) + seq_linear_index = contraction_sequence(ts; kwargs...) + # TODO: Use `Functors.fmap` or `StructWalk`? + return deepmap(n -> Key(parent_vertex_to_vertex(tn, n)), seq_linear_index) end function contraction_sequence(::Algorithm"optimal", tn::Vector{ITensor}) diff --git a/src/contraction_tree_to_graph.jl b/src/contraction_tree_to_graph.jl index b7941dd4..d026376e 100644 --- a/src/contraction_tree_to_graph.jl +++ b/src/contraction_tree_to_graph.jl @@ -1,4 +1,8 @@ -using Graphs.SimpleGraphs: rem_vertex! +using AbstractTrees: Leaves, PostOrderDFS +using Graphs: add_vertex!, dst, edges, rem_vertex!, src +using NamedGraphs: NamedDiGraph, NamedGraph +using NamedGraphs.GraphsExtensions: is_leaf_edge, root_vertex + """ Take a contraction sequence and return a directed graph. """ @@ -37,7 +41,7 @@ function contraction_sequence_to_graph(contract_sequence) for e in edges(direct_g) add_edge!(g, e) end - root = _root(direct_g) + root = root_vertex(direct_g) c1, c2 = child_vertices(direct_g, root) rem_vertex!(g, root) add_edge!(g, c1 => c2) diff --git a/src/edge_sequences.jl b/src/edge_sequences.jl index 24b71e07..9dab9fff 100644 --- a/src/edge_sequences.jl +++ b/src/edge_sequences.jl @@ -1,6 +1,10 @@ -using NamedGraphs: partitioned_graph -using Graphs: connected_components -using Graphs: IsDirected +using Graphs: IsDirected, connected_components, edges, edgetype +using ITensors.NDTensors: Algorithm, @Algorithm_str +using NamedGraphs: NamedGraphs +using NamedGraphs.GraphsExtensions: GraphsExtensions, forest_cover, undirected_graph +using NamedGraphs.PartitionedGraphs: PartitionEdge, PartitionedGraph, partitioned_graph +using SimpleTraits: SimpleTraits, Not, @traitfn + default_edge_sequence_alg() = "forest_cover" function default_edge_sequence(pg::PartitionedGraph) return PartitionEdge.(edge_sequence(partitioned_graph(pg))) @@ -21,9 +25,11 @@ end end @traitfn function edge_sequence( - ::Algorithm"forest_cover", g::::(!IsDirected); root_vertex=NamedGraphs.default_root_vertex + ::Algorithm"forest_cover", + g::::(!IsDirected); + root_vertex=GraphsExtensions.default_root_vertex, ) - forests = NamedGraphs.forest_cover(g) + forests = forest_cover(g) edges = edgetype(g)[] for forest in forests trees = [forest[vs] for vs in connected_components(forest)] @@ -32,7 +38,6 @@ end push!(edges, vcat(tree_edges, reverse(reverse.(tree_edges)))...) end end - return edges end diff --git a/src/environment.jl b/src/environment.jl index 262a7c23..37249cb3 100644 --- a/src/environment.jl +++ b/src/environment.jl @@ -10,15 +10,9 @@ function environment( end function environment( - ::Algorithm"exact", - ψ::AbstractITensorNetwork, - verts::Vector; - contraction_sequence_alg="optimal", - kwargs..., + ::Algorithm"exact", ψ::AbstractITensorNetwork, verts::Vector; kwargs... ) - ψ_reduced = Vector{ITensor}(subgraph(ψ, setdiff(vertices(ψ), verts))) - sequence = contraction_sequence(ψ_reduced; alg=contraction_sequence_alg) - return ITensor[contract(ψ_reduced; sequence, kwargs...)] + return [contract(subgraph(ψ, setdiff(vertices(ψ), verts)); kwargs...)] end function environment( diff --git a/src/formnetworks/abstractformnetwork.jl b/src/formnetworks/abstractformnetwork.jl index 17f647eb..ad3953a8 100644 --- a/src/formnetworks/abstractformnetwork.jl +++ b/src/formnetworks/abstractformnetwork.jl @@ -1,4 +1,6 @@ using Graphs: induced_subgraph +using NamedGraphs.SimilarType: SimilarType + default_bra_vertex_suffix() = "bra" default_ket_vertex_suffix() = "ket" default_operator_vertex_suffix() = "operator" @@ -7,12 +9,17 @@ abstract type AbstractFormNetwork{V} <: AbstractITensorNetwork{V} end #Needed for interface dual_index_map(f::AbstractFormNetwork) = not_implemented() +# TODO: Use `NamedGraphs.GraphsExtensions.parent_graph`. tensornetwork(f::AbstractFormNetwork) = not_implemented() Base.copy(f::AbstractFormNetwork) = not_implemented() operator_vertex_suffix(f::AbstractFormNetwork) = not_implemented() bra_vertex_suffix(f::AbstractFormNetwork) = not_implemented() ket_vertex_suffix(f::AbstractFormNetwork) = not_implemented() +function SimilarType.similar_type(f::AbstractFormNetwork) + return typeof(tensornetwork(f)) +end + function operator_vertices(f::AbstractFormNetwork) return filter(v -> last(v) == operator_vertex_suffix(f), vertices(f)) end diff --git a/src/formnetworks/bilinearformnetwork.jl b/src/formnetworks/bilinearformnetwork.jl index 14d21114..306cb4a1 100644 --- a/src/formnetworks/bilinearformnetwork.jl +++ b/src/formnetworks/bilinearformnetwork.jl @@ -1,3 +1,5 @@ +using ITensors: ITensor, Op, prime, sim + default_dual_site_index_map = prime default_dual_link_index_map = sim @@ -38,8 +40,11 @@ end operator_vertex_suffix(blf::BilinearFormNetwork) = blf.operator_vertex_suffix bra_vertex_suffix(blf::BilinearFormNetwork) = blf.bra_vertex_suffix ket_vertex_suffix(blf::BilinearFormNetwork) = blf.ket_vertex_suffix +# TODO: Use `NamedGraphs.GraphsExtensions.parent_graph`. tensornetwork(blf::BilinearFormNetwork) = blf.tensornetwork +# TODO: Use `NamedGraphs.GraphsExtensions.parent_graph_type`. data_graph_type(::Type{<:BilinearFormNetwork}) = data_graph_type(tensornetwork(blf)) +# TODO: Use `NamedGraphs.GraphsExtensions.parent_graph`. data_graph(blf::BilinearFormNetwork) = data_graph(tensornetwork(blf)) function Base.copy(blf::BilinearFormNetwork) @@ -59,6 +64,7 @@ function BilinearFormNetwork( ) @assert issetequal(flatten_siteinds(bra), flatten_siteinds(ket)) operator_inds = union_all_inds(siteinds(ket), dual_site_index_map(siteinds(ket))) + # TODO: Define and use `identity_network` here. O = ITensorNetwork(Op("I"), operator_inds) return BilinearFormNetwork(O, bra, ket; dual_site_index_map, kwargs...) end diff --git a/src/gauging.jl b/src/gauging.jl index eb82c277..9f394243 100644 --- a/src/gauging.jl +++ b/src/gauging.jl @@ -1,10 +1,11 @@ -using NamedGraphs: partitionedge -using IterTools: cache using ITensors: tags using ITensors.NDTensors: dense, scalartype +using NamedGraphs.PartitionedGraphs: partitionedge function default_bond_tensors(ψ::ITensorNetwork) - return DataGraph{vertextype(ψ),Nothing,ITensor}(underlying_graph(ψ)) + return DataGraph( + underlying_graph(ψ); vertex_data_eltype=Nothing, edge_data_eltype=ITensor + ) end struct VidalITensorNetwork{V,BTS} <: AbstractITensorNetwork{V} diff --git a/src/indsnetwork.jl b/src/indsnetwork.jl index 311f6494..befc5cdc 100644 --- a/src/indsnetwork.jl +++ b/src/indsnetwork.jl @@ -1,11 +1,13 @@ using DataGraphs: DataGraphs, DataGraph, IsUnderlyingGraph, map_data, vertex_data -using Dictionaries: AbstractDictionary, Indices +using Dictionaries: AbstractDictionary, Dictionary, Indices using Graphs: Graphs using Graphs.SimpleGraphs: AbstractSimpleGraph -using ITensors: Index, dag -using ITensors.ITensorVisualizationCore: ITensorVisualizationCore, visualize -using NamedGraphs: - NamedGraphs, AbstractNamedGraph, NamedEdge, NamedGraph, named_path_graph, vertextype +using ITensors: Index, QN, dag +using .ITensorsExtensions: ITensorsExtensions, indtype +using NamedGraphs: NamedGraphs, AbstractNamedGraph, NamedEdge, NamedGraph +using NamedGraphs.GraphsExtensions: vertextype +using NamedGraphs.NamedGraphGenerators: named_path_graph +using SimpleTraits: SimpleTraits, Not, @traitfn struct IndsNetwork{V,I} <: AbstractIndsNetwork{V,I} data_graph::DataGraph{V,Vector{I},Vector{I},NamedGraph{V},NamedEdge{V}} @@ -13,8 +15,8 @@ struct IndsNetwork{V,I} <: AbstractIndsNetwork{V,I} return new{V,I}(g) end end -indtype(inds_network::IndsNetwork) = indtype(typeof(inds_network)) -indtype(::Type{<:IndsNetwork{V,I}}) where {V,I} = I +ITensorsExtensions.indtype(inds_network::IndsNetwork) = indtype(typeof(inds_network)) +ITensorsExtensions.indtype(::Type{<:IndsNetwork{V,I}}) where {V,I} = I data_graph(is::IndsNetwork) = is.data_graph DataGraphs.underlying_graph(is::IndsNetwork) = underlying_graph(data_graph(is)) NamedGraphs.vertextype(::Type{<:IndsNetwork{V}}) where {V} = V @@ -76,7 +78,7 @@ function IndsNetwork{V,I}( link_space::Dictionary{<:Any,<:Vector{<:Index}}, site_space::Dictionary{<:Any,<:Vector{<:Index}}, ) where {V,I} - dg = DataGraph{V,Vector{I},Vector{I}}(g) + dg = DataGraph{V}(g; vertex_data_eltype=Vector{I}, edge_data_eltype=Vector{I}) for e in keys(link_space) dg[e] = link_space[e] end @@ -109,29 +111,6 @@ function path_indsnetwork(external_inds::Vector{<:Index}) return path_indsnetwork(map(i -> [i], external_inds)) end -# TODO: Replace with a trait of the same name. -const IsIndexSpace = Union{<:Integer,Vector{<:Pair{QN,<:Integer}}} - -# Infer the `Index` type of an `IndsNetwork` from the -# spaces that get input. -indtype(link_space::Nothing, site_space::Nothing) = Index -indtype(link_space::Nothing, site_space) = indtype(site_space) -indtype(link_space, site_space::Nothing) = indtype(link_space) -indtype(link_space, site_space) = promote_type(indtype(link_space), indtype(site_space)) - -# Default to type space -indtype(space) = _indtype(typeof(space)) - -# Base case -# Use `_indtype` to avoid recursion overflow -_indtype(T::Type{<:Index}) = T -_indtype(T::Type{<:IsIndexSpace}) = Index{T} -_indtype(::Type{Nothing}) = Index - -# Containers -_indtype(T::Type{<:AbstractDictionary}) = _indtype(eltype(T)) -_indtype(T::Type{<:AbstractVector}) = _indtype(eltype(T)) - @traitfn function default_link_space(V::Type, g::::IsUnderlyingGraph) # TODO: Convert `g` to vertex type `V` E = edgetype(g) @@ -319,6 +298,10 @@ end # Visualization # +# TODO: Move to an `ITensorNetworksVisualizationInterfaceExt` +# package extension (and define a `VisualizationInterface` package +# based on `ITensorVisualizationCore`.). +using ITensors.ITensorVisualizationCore: ITensorVisualizationCore, visualize function ITensorVisualizationCore.visualize(is::IndsNetwork, args...; kwargs...) return visualize(ITensorNetwork(is), args...; kwargs...) end diff --git a/src/itensornetwork.jl b/src/itensornetwork.jl index a2d4af3f..6c3cf060 100644 --- a/src/itensornetwork.jl +++ b/src/itensornetwork.jl @@ -1,6 +1,7 @@ using DataGraphs: DataGraphs, DataGraph using Dictionaries: Indices, dictionary using ITensors: ITensors, ITensor, op, state +using .ITensorsExtensions: trivial_space using NamedGraphs: NamedGraphs, NamedEdge, NamedGraph, vertextype struct Private end @@ -30,7 +31,9 @@ end function ITensorNetwork{V}() where {V} # TODO: Is there a better way to write this? # Try using `convert_vertextype`. - return _ITensorNetwork(data_graph_type(ITensorNetwork{V})()) + new_data_graph_type = data_graph_type(ITensorNetwork{V}) + new_underlying_graph_type = underlying_graph_type(new_data_graph_type) + return _ITensorNetwork(new_data_graph_type(new_underlying_graph_type())) end function ITensorNetwork{V}(tn::ITensorNetwork) where {V} # TODO: Is there a better way to write this? @@ -237,6 +240,10 @@ end ITensorNetwork(itns::Vector{ITensorNetwork}) = reduce(⊗, itns) -function Base.Vector{ITensor}(ψ::ITensorNetwork) - return ITensor[ψ[v] for v in vertices(ψ)] +# TODO: Use `vertex_data` here? +function eachtensor(ψ::ITensorNetwork) + # This type declaration is needed to narrow + # the element type of the resulting `Dictionary`, + # raise and issue with `Dictionaries.jl`. + return map(v -> ψ[v]::ITensor, vertices(ψ)) end diff --git a/src/lib/BaseExtensions/src/BaseExtensions.jl b/src/lib/BaseExtensions/src/BaseExtensions.jl new file mode 100644 index 00000000..49ba9110 --- /dev/null +++ b/src/lib/BaseExtensions/src/BaseExtensions.jl @@ -0,0 +1,8 @@ +module BaseExtensions +# Convert to real if possible +maybe_real(x::Real) = x +maybe_real(x::Complex) = iszero(imag(x)) ? real(x) : x + +to_tuple(x) = (x,) +to_tuple(x::Tuple) = x +end diff --git a/src/lib/ITensorsExtensions/src/ITensorsExtensions.jl b/src/lib/ITensorsExtensions/src/ITensorsExtensions.jl new file mode 100644 index 00000000..1346b65a --- /dev/null +++ b/src/lib/ITensorsExtensions/src/ITensorsExtensions.jl @@ -0,0 +1,5 @@ +module ITensorsExtensions +include("itensor.jl") +include("itensor_more.jl") +include("opsum.jl") +end diff --git a/src/ITensorsExtensions/ITensorsExtensions.jl b/src/lib/ITensorsExtensions/src/itensor.jl similarity index 98% rename from src/ITensorsExtensions/ITensorsExtensions.jl rename to src/lib/ITensorsExtensions/src/itensor.jl index 5b58e663..94fc32b9 100644 --- a/src/ITensorsExtensions/ITensorsExtensions.jl +++ b/src/lib/ITensorsExtensions/src/itensor.jl @@ -1,4 +1,3 @@ -module ITensorsExtensions using LinearAlgebra: LinearAlgebra, eigen, pinv using ITensors: ITensor, @@ -88,5 +87,3 @@ function diagblocks(D::Tensor) end diagblocks(it::ITensor) = itensor(diagblocks(tensor(it))) - -end diff --git a/src/itensors.jl b/src/lib/ITensorsExtensions/src/itensor_more.jl similarity index 74% rename from src/itensors.jl rename to src/lib/ITensorsExtensions/src/itensor_more.jl index b47e4b0f..1aa5bb5a 100644 --- a/src/itensors.jl +++ b/src/lib/ITensorsExtensions/src/itensor_more.jl @@ -1,8 +1,7 @@ -using ITensors: filterinds -using NamedGraphs: Key -using ITensors: ITensors, Index, ITensor, QN, inds, op, replaceinds, uniqueinds +using NamedGraphs.Keys: Key +using ITensors: ITensors, Index, ITensor, QN, filterinds, inds, op, replaceinds, uniqueinds using ITensors.NDTensors: NDTensors -using Dictionaries: Dictionary +using Dictionaries: AbstractDictionary, Dictionary # Tensor sum: `A ⊞ B = A ⊗ Iᴮ + Iᴬ ⊗ B` # https://github.com/JuliaLang/julia/issues/13333#issuecomment-143825995 @@ -26,6 +25,29 @@ end # TODO: Move patch to `ITensors.jl`. ITensors._contract(As, index::Key) = As[index] +# TODO: Replace with a trait of the same name. +const IsIndexSpace = Union{<:Integer,Vector{<:Pair{QN,<:Integer}}} + +# Infer the `Index` type of an `IndsNetwork` from the +# spaces that get input. +indtype(link_space::Nothing, site_space::Nothing) = Index +indtype(link_space::Nothing, site_space) = indtype(site_space) +indtype(link_space, site_space::Nothing) = indtype(link_space) +indtype(link_space, site_space) = promote_type(indtype(link_space), indtype(site_space)) + +# Default to type space +indtype(space) = _indtype(typeof(space)) + +# Base case +# Use `_indtype` to avoid recursion overflow +_indtype(T::Type{<:Index}) = T +_indtype(T::Type{<:IsIndexSpace}) = Index{T} +_indtype(::Type{Nothing}) = Index + +# Containers +_indtype(T::Type{<:AbstractDictionary}) = _indtype(eltype(T)) +_indtype(T::Type{<:AbstractVector}) = _indtype(eltype(T)) + indtype(a::ITensor) = promote_indtype(typeof.(inds(a))...) spacetype(::Index{T}) where {T} = T @@ -65,6 +87,8 @@ function promote_indtype_rule(type1::Type{<:Index}, type2::Type{<:Index}) return Index{promote_spacetype_rule(spacetype(type1), spacetype(type2))} end +function promote_indtypeof end + trivial_space(x) = trivial_space(promote_indtypeof(x)) trivial_space(x::Type) = trivial_space(promote_indtype(x)) diff --git a/src/lib/ITensorsExtensions/src/opsum.jl b/src/lib/ITensorsExtensions/src/opsum.jl new file mode 100644 index 00000000..783ed4fb --- /dev/null +++ b/src/lib/ITensorsExtensions/src/opsum.jl @@ -0,0 +1,39 @@ +using ..BaseExtensions: maybe_real, to_tuple +using Graphs: dst, edges, src +using ITensors: ITensors +using ITensors.LazyApply: Applied, Prod, Scaled, Sum +using ITensors.Ops: Ops, Op +using SplitApplyCombine: group + +# TODO: Rename this `replace_sites`? +# TODO: Use `fmap`, `deepmap`, `treemap`? +function replace_vertices(f, ∑o::Sum) + return Sum(map(oᵢ -> replace_vertices(f, oᵢ), Ops.terms(∑o))) +end + +function replace_vertices(f, ∏o::Prod) + return Prod(map(oᵢ -> replace_vertices(f, oᵢ), Ops.terms(∏o))) +end + +function replace_vertices(f, o::Scaled) + return maybe_real(Ops.coefficient(o)) * replace_vertices(f, Ops.argument(o)) +end + +set_sites(o::Op, sites) = Op(Ops.which_op(o), sites...; Ops.params(o)...) + +function replace_vertices(f, o::Op) + return set_sites(o, f.(Ops.sites(o))) +end + +## function replace_vertices(o::Union{Op,Applied}, vertex_map) +## return replace_vertices(v -> get(vertex_map, v, v), o) +## end + +function group_terms(ℋ::Sum, g) + grouped_terms = group(ITensors.terms(ℋ)) do t + findfirst(edges(g)) do e + to_tuple.(ITensors.sites(t)) ⊆ [src(e), dst(e)] + end + end + return Sum(collect(sum.(grouped_terms))) +end diff --git a/src/ModelHamiltonians/ModelHamiltonians.jl b/src/lib/ModelHamiltonians/src/ModelHamiltonians.jl similarity index 100% rename from src/ModelHamiltonians/ModelHamiltonians.jl rename to src/lib/ModelHamiltonians/src/ModelHamiltonians.jl diff --git a/src/ModelNetworks/ModelNetworks.jl b/src/lib/ModelNetworks/src/ModelNetworks.jl similarity index 100% rename from src/ModelNetworks/ModelNetworks.jl rename to src/lib/ModelNetworks/src/ModelNetworks.jl diff --git a/src/observers.jl b/src/observers.jl index 94359041..4b668a71 100644 --- a/src/observers.jl +++ b/src/observers.jl @@ -1,3 +1,4 @@ +# TODO: Move to `ITensorNetworksObserversExt`. using Observers: Observers """ diff --git a/src/opsum.jl b/src/opsum.jl index c86f6f37..8ea5cd13 100644 --- a/src/opsum.jl +++ b/src/opsum.jl @@ -1,38 +1,8 @@ +using .BaseExtensions: maybe_real +using ITensors: ITensor, hascommoninds, op using ITensors.LazyApply: Applied, Prod, Scaled, Sum using ITensors.Ops: Ops, Op - -# TODO: Rename this `replace_sites`? -# TODO: Use `fmap`, `deepmap`, `treemap`? -function replace_vertices(f, ∑o::Sum) - return Sum(map(oᵢ -> replace_vertices(f, oᵢ), Ops.terms(∑o))) -end - -function replace_vertices(f, ∏o::Prod) - return Prod(map(oᵢ -> replace_vertices(f, oᵢ), Ops.terms(∏o))) -end - -function replace_vertices(f, o::Scaled) - return maybe_real(Ops.coefficient(o)) * replace_vertices(f, Ops.argument(o)) -end - -set_sites(o::Op, sites) = Op(Ops.which_op(o), sites...; Ops.params(o)...) - -function replace_vertices(f, o::Op) - return set_sites(o, f.(Ops.sites(o))) -end - -function replace_vertices(o::Union{Op,Applied}, vertex_map) - return replace_vertices(v -> get(vertex_map, v, v), o) -end - -function group_terms(ℋ::Sum, g) - grouped_terms = group(ITensors.terms(ℋ)) do t - findfirst(edges(g)) do e - to_tuple.(ITensors.sites(t)) ⊆ [src(e), dst(e)] - end - end - return Sum(collect(sum.(grouped_terms))) -end +using .ITensorsExtensions: tensor_sum function ITensors.ITensor(o::Op, s::IndsNetwork) s⃗ = [only(s[nᵢ]) for nᵢ in Ops.sites(o)] diff --git a/src/partitioneditensornetwork.jl b/src/partitioneditensornetwork.jl index b23a9af2..e249c973 100644 --- a/src/partitioneditensornetwork.jl +++ b/src/partitioneditensornetwork.jl @@ -1,6 +1,8 @@ +using Graphs: dst, src using ITensors: commoninds using ITensors.ITensorMPS: ITensorMPS -using NamedGraphs: PartitionedGraph, PartitionEdge, subgraph +using NamedGraphs.GraphsExtensions: subgraph +using NamedGraphs.PartitionedGraphs: PartitionedGraph, PartitionEdge function ITensorMPS.linkinds(pitn::PartitionedGraph, edge::PartitionEdge) src_e_itn = subgraph(pitn, src(edge)) diff --git a/src/sitetype.jl b/src/sitetype.jl index 8d046053..407fa2e4 100644 --- a/src/sitetype.jl +++ b/src/sitetype.jl @@ -1,13 +1,14 @@ -using ITensors: siteind +using Dictionaries: Dictionary +using Graphs: AbstractGraph, nv, vertices +using ITensors: ITensors, Index, siteind, siteinds + function ITensors.siteind(sitetype::String, v::Tuple; kwargs...) - return addtags(siteind(sitetype; kwargs...), ITensorNetworks.vertex_tag(v)) + return addtags(siteind(sitetype; kwargs...), vertex_tag(v)) end # naming collision of ITensors.addtags and addtags keyword in siteind system function ITensors.siteind(d::Integer, v; addtags="", kwargs...) - return ITensors.addtags( - Index(d; tags="Site, $addtags", kwargs...), ITensorNetworks.vertex_tag(v) - ) + return ITensors.addtags(Index(d; tags="Site, $addtags", kwargs...), vertex_tag(v)) end function ITensors.siteinds(sitetypes::AbstractDictionary, g::AbstractGraph; kwargs...) diff --git a/src/solvers/alternating_update/alternating_update.jl b/src/solvers/alternating_update/alternating_update.jl index 8fae9532..3b0f6b77 100644 --- a/src/solvers/alternating_update/alternating_update.jl +++ b/src/solvers/alternating_update/alternating_update.jl @@ -1,5 +1,6 @@ using ITensors: state using ITensors.ITensorMPS: linkind +using NamedGraphs.GraphsExtensions: GraphsExtensions using Observers: Observers function alternating_update( @@ -13,7 +14,7 @@ function alternating_update( sweep_printer=nothing, (sweep_observer!)=nothing, (region_observer!)=nothing, - root_vertex=default_root_vertex(init_state), + root_vertex=GraphsExtensions.default_root_vertex(init_state), extracter_kwargs=(;), extracter=default_extracter(), updater_kwargs=(;), diff --git a/src/solvers/sweep_plans/sweep_plans.jl b/src/solvers/sweep_plans/sweep_plans.jl index 208f9bce..69221995 100644 --- a/src/solvers/sweep_plans/sweep_plans.jl +++ b/src/solvers/sweep_plans/sweep_plans.jl @@ -1,3 +1,6 @@ +using Graphs: AbstractEdge, dst, src +using NamedGraphs.GraphsExtensions: GraphsExtensions + direction(step_number) = isodd(step_number) ? Base.Forward : Base.Reverse function overlap(edge_a::AbstractEdge, edge_b::AbstractEdge) @@ -58,7 +61,7 @@ end function forward_sweep( dir::Base.ForwardOrdering, graph::AbstractGraph; - root_vertex=default_root_vertex(graph), + root_vertex=GraphsExtensions.default_root_vertex(graph), region_kwargs, reverse_kwargs=region_kwargs, reverse_step=false, @@ -141,7 +144,10 @@ function default_sweep_plans( end function default_sweep_plan( - graph::AbstractGraph; root_vertex=default_root_vertex(graph), region_kwargs, nsites::Int + graph::AbstractGraph; + root_vertex=GraphsExtensions.default_root_vertex(graph), + region_kwargs, + nsites::Int, ) return vcat( [ @@ -158,7 +164,7 @@ end function tdvp_sweep_plan( graph::AbstractGraph; - root_vertex=default_root_vertex(graph), + root_vertex=GraphsExtensions.default_root_vertex(graph), region_kwargs, reverse_step=true, order::Int, diff --git a/src/solvers/tdvp.jl b/src/solvers/tdvp.jl index 1b70015e..7a58fe1b 100644 --- a/src/solvers/tdvp.jl +++ b/src/solvers/tdvp.jl @@ -1,3 +1,5 @@ +using NamedGraphs.GraphsExtensions: GraphsExtensions + #ToDo: Cleanup _compute_nsweeps, maybe restrict flexibility to simplify code function _compute_nsweeps(nsweeps::Int, t::Number, time_step::Number) return error("Cannot specify both nsweeps and time_step in tdvp") @@ -101,7 +103,7 @@ function tdvp( sweep_printer=nothing, (sweep_observer!)=nothing, (region_observer!)=nothing, - root_vertex=default_root_vertex(init_state), + root_vertex=GraphsExtensions.default_root_vertex(init_state), reverse_step=true, extracter_kwargs=(;), extracter=default_extracter(), # ToDo: extracter could be inside extracter_kwargs, at the cost of having to extract it in region_update diff --git a/src/treetensornetworks/abstracttreetensornetwork.jl b/src/treetensornetworks/abstracttreetensornetwork.jl index 33146a70..8c54bddb 100644 --- a/src/treetensornetworks/abstracttreetensornetwork.jl +++ b/src/treetensornetworks/abstracttreetensornetwork.jl @@ -1,5 +1,6 @@ using Graphs: has_vertex -using NamedGraphs: edge_path, leaf_vertices, post_order_dfs_edges, post_order_dfs_vertices +using NamedGraphs.GraphsExtensions: + GraphsExtensions, edge_path, leaf_vertices, post_order_dfs_edges, post_order_dfs_vertices using IsApprox: IsApprox, Approx using ITensors: @Algorithm_str, directsum, hasinds, permute, plev using ITensors.ITensorMPS: linkind, loginner, lognorm, orthogonalize @@ -20,11 +21,6 @@ end ITensorNetwork(tn::AbstractTTN) = error("Not implemented") ortho_region(tn::AbstractTTN) = error("Not implemented") -function default_root_vertex(gs::AbstractGraph...) - # @assert all(is_tree.(gs)) - return first(leaf_vertices(gs[end])) -end - # # Orthogonality center # @@ -50,7 +46,7 @@ function ITensorMPS.orthogonalize(tn::AbstractTTN, ortho_center; kwargs...) for e in edge_list tn = orthogonalize(tn, e) end - return set_ortho_region(tn, [ortho_center]) + return set_ortho_region(tn, typeof(ortho_region(tn))([ortho_center])) end # For ambiguity error @@ -63,12 +59,14 @@ end # Truncation # -function Base.truncate(tn::AbstractTTN; root_vertex=default_root_vertex(tn), kwargs...) +function Base.truncate( + tn::AbstractTTN; root_vertex=GraphsExtensions.default_root_vertex(tn), kwargs... +) for e in post_order_dfs_edges(tn, root_vertex) # always orthogonalize towards source first to make truncations controlled tn = orthogonalize(tn, src(e)) tn = truncate(tn, e; kwargs...) - tn = set_ortho_region(tn, [dst(e)]) + tn = set_ortho_region(tn, typeof(ortho_region(tn))([dst(e)])) end return tn end @@ -83,7 +81,9 @@ end # # TODO: decide on contraction order: reverse dfs vertices or forward dfs edges? -function NDTensors.contract(tn::AbstractTTN, root_vertex=default_root_vertex(tn); kwargs...) +function NDTensors.contract( + tn::AbstractTTN, root_vertex=GraphsExtensions.default_root_vertex(tn); kwargs... +) tn = copy(tn) # reverse post order vertices traversal_order = reverse(post_order_dfs_vertices(tn, root_vertex)) @@ -97,7 +97,7 @@ function NDTensors.contract(tn::AbstractTTN, root_vertex=default_root_vertex(tn) end function ITensors.inner( - x::AbstractTTN, y::AbstractTTN; root_vertex=default_root_vertex(x, y) + x::AbstractTTN, y::AbstractTTN; root_vertex=GraphsExtensions.default_root_vertex(x) ) xᴴ = sim(dag(x); sites=[]) y = sim(y; sites=[]) @@ -185,7 +185,7 @@ end # TODO: stick with this traversal or find optimal contraction sequence? function ITensorMPS.loginner( - tn1::AbstractTTN, tn2::AbstractTTN; root_vertex=default_root_vertex(tn1, tn2) + tn1::AbstractTTN, tn2::AbstractTTN; root_vertex=GraphsExtensions.default_root_vertex(tn1) ) N = nv(tn1) if nv(tn2) != N @@ -227,14 +227,16 @@ function Base.:+( ::Algorithm"densitymatrix", tns::AbstractTTN...; cutoff=1e-15, - root_vertex=default_root_vertex(tns...), + root_vertex=GraphsExtensions.default_root_vertex(first(tns)), kwargs..., ) return error("Not implemented (yet) for trees.") end function Base.:+( - ::Algorithm"directsum", tns::AbstractTTN...; root_vertex=default_root_vertex(tns...) + ::Algorithm"directsum", + tns::AbstractTTN...; + root_vertex=GraphsExtensions.default_root_vertex(first(tns)), ) @assert all(tn -> nv(first(tns)) == nv(tn), tns) @@ -301,7 +303,10 @@ end # TODO: implement using multi-graph disjoint union function ITensors.inner( - y::AbstractTTN, A::AbstractTTN, x::AbstractTTN; root_vertex=default_root_vertex(x, A, y) + y::AbstractTTN, + A::AbstractTTN, + x::AbstractTTN; + root_vertex=GraphsExtensions.default_root_vertex(x), ) traversal_order = reverse(post_order_dfs_vertices(x, root_vertex)) ydag = sim(dag(y); sites=[]) @@ -319,7 +324,7 @@ function ITensors.inner( y::AbstractTTN, A::AbstractTTN, x::AbstractTTN; - root_vertex=default_root_vertex(B, y, A, x), + root_vertex=GraphsExtensions.default_root_vertex(B), ) N = nv(B) if nv(y) != N || nv(x) != N || nv(A) != N @@ -348,8 +353,8 @@ function ITensorMPS.expect( operator::String, state::AbstractTTN; vertices=vertices(state), - # ToDo: verify that this is a sane default - root_vertex=default_root_vertex(siteinds(state)), + # TODO: verify that this is a sane default + root_vertex=GraphsExtensions.default_root_vertex(state), ) # TODO: Optimize this with proper caching. state /= norm(state) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index 3c4380f2..c54d3b01 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -4,9 +4,10 @@ using ITensors.ITensorMPS: ITensorMPS, cutoff, linkdims, truncate! using ITensors.LazyApply: Prod, Sum, coefficient using ITensors.NDTensors: Block, blockdim, maxdim, nblocks, nnzblocks using ITensors.Ops: Op, OpSum -using NamedGraphs: degrees, is_leaf, vertex_path +using NamedGraphs.GraphsExtensions: + GraphsExtensions, boundary_edges, degrees, is_leaf_vertex, vertex_path using StaticArrays: MVector -using NamedGraphs: boundary_edges + # convert ITensors.OpSum to TreeTensorNetwork # @@ -61,13 +62,18 @@ function ttn_svd( # traverse tree outwards from root vertex vs = _default_vertex_ordering(sites, root_vertex) - # ToDo: Add check in ttn_svd that the ordering matches that of find_index_in_tree, which is used in sorteachterm #fermion-sign! - es = _default_edge_ordering(sites, root_vertex) # store edges in fixed ordering relative to root + # TODO: Add check in ttn_svd that the ordering matches that of find_index_in_tree, which is used in sorteachterm #fermion-sign! + # store edges in fixed ordering relative to root + es = _default_edge_ordering(sites, root_vertex) # some things to keep track of - degrees = Dict(v => degree(sites, v) for v in vs) # rank of every TTN tensor in network - Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in es) # link isometries for SVD compression of TTN - inmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() # map from term in Hamiltonian to incoming channel index for every edge - outmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() # map from term in Hamiltonian to outgoing channel index for every edge + # rank of every TTN tensor in network + degrees = Dict(v => degree(sites, v) for v in vs) + # link isometries for SVD compression of TTN + Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in es) + # map from term in Hamiltonian to incoming channel index for every edge + inmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() + # map from term in Hamiltonian to outgoing channel index for every edge + outmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() op_cache = Dict{Pair{String,vertextype_sites},ITensor}() @@ -95,14 +101,16 @@ function ttn_svd( is_internal[v] = isempty(sites[v]) if isempty(sites[v]) # FIXME: This logic only works for trivial flux, breaks for nonzero flux - # ToDo: add assert or fix and add test! + # TODO: add assert or fix and add test! sites[v] = [Index(Hflux => 1)] end end + # bond coefficients for incoming edge channels inbond_coefs = Dict( e => Dict{QN,Vector{ITensorMPS.MatElem{coefficient_type}}}() for e in es - ) # bond coefficients for incoming edge channels - site_coef_done = Prod{Op}[] # list of terms for which the coefficient has been added to a site factor + ) + # list of terms for which the coefficient has been added to a site factor + site_coef_done = Prod{Op}[] # temporary symbolic representation of TTN Hamiltonian tempTTN = Dict(v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degrees[v]}[] for v in vs) @@ -132,7 +140,7 @@ function ttn_svd( # sanity check, leaves only have single incoming or outgoing edge @assert !isempty(dims_out) || !isnothing(dim_in) - (isempty(dims_out) || isnothing(dim_in)) && @assert is_leaf(sites, v) + (isempty(dims_out) || isnothing(dim_in)) && @assert is_leaf_vertex(sites, v) for term in os # loop over OpSum and pick out terms that act on current vertex @@ -202,7 +210,8 @@ function ttn_svd( coutmap = get!( outmaps, edges[dout] => outgoing_qns[edges[dout]], Dict{Vector{Op},Int}() ) - T_inds[dout] = ITensorMPS.posInLink!(coutmap, outgoing[edges[dout]]) # add outgoing channel + # add outgoing channel + T_inds[dout] = ITensorMPS.posInLink!(coutmap, outgoing[edges[dout]]) T_qns[dout] = outgoing_qns[edges[dout]] end # if term starts at this site, add its coefficient as a site factor @@ -210,7 +219,8 @@ function ttn_svd( if (isnothing(dim_in) || T_inds[dim_in] == -1) && ITensors.argument(term) ∉ site_coef_done site_coef = ITensors.coefficient(term) - site_coef = convert(coefficient_type, site_coef) # required since ITensors.coefficient seems to return ComplexF64 even if coefficient_type is determined to be real + # required since ITensors.coefficient seems to return ComplexF64 even if coefficient_type is determined to be real + site_coef = convert(coefficient_type, site_coef) push!(site_coef_done, ITensors.argument(term)) end # add onsite identity for interactions passing through vertex @@ -267,7 +277,7 @@ function ttn_svd( for v in vs # redo the whole thing like before - # ToDo: use neighborhood instead of going through all edges, see above + # TODO: use neighborhood instead of going through all edges, see above edges = align_and_reorder_edges(incident_edges(sites, v), es) dim_in = findfirst(e -> dst(e) == v, edges) dims_out = findall(e -> src(e) == v, edges) @@ -336,7 +346,8 @@ function ttn_svd( for ((b, q_op), m) in blocks Op = computeSiteProd(sites, Prod(q_op)) - if hasqns(Op) # FIXME: this may not be safe, we may want to check for the equivalent (zero tensor?) case in the dense case as well + if hasqns(Op) + # FIXME: this may not be safe, we may want to check for the equivalent (zero tensor?) case in the dense case as well iszero(nnzblocks(Op)) && continue end sq = flux(Op) @@ -362,7 +373,7 @@ function ttn_svd( if is_internal[v] H[v] += iT else - #ToDo: Remove this assert since it seems to be costly + #TODO: Remove this assert since it seems to be costly #if hasqns(iT) # @assert flux(iT * Op) == Hflux #end @@ -374,18 +385,22 @@ function ttn_svd( # add starting and ending identity operators idT = zeros(coefficient_type, linkdims...) if isnothing(dim_in) - idT[ones(Int, degrees[v])...] = 1.0 # only one real starting identity + # only one real starting identity + idT[ones(Int, degrees[v])...] = 1.0 end # ending identities are a little more involved if !isnothing(dim_in) - idT[linkdims...] = 1.0 # place identity if all channels end + # place identity if all channels end + idT[linkdims...] = 1.0 # place identity from start of incoming channel to start of each single outgoing channel, and end all other channels idT_end_inds = [linkdims...] - idT_end_inds[dim_in] = 1 #this should really be an int + #this should really be an int + idT_end_inds[dim_in] = 1 for dout in dims_out idT_end_inds[dout] = 1 idT[idT_end_inds...] = 1.0 - idT_end_inds[dout] = linkdims[dout] # reset + # reset + idT_end_inds[dout] = linkdims[dout] end end T = itensor(idT, _linkinds) @@ -522,29 +537,17 @@ Convert an OpSum object `os` to a TreeTensorNetwork, with indices given by `site function ttn( os::OpSum, sites::IndsNetwork; - root_vertex=default_root_vertex(sites), - splitblocks=false, - algorithm="svd", + root_vertex=GraphsExtensions.default_root_vertex(sites), kwargs..., -)::TTN +) length(ITensors.terms(os)) == 0 && error("OpSum has no terms") is_tree(sites) || error("Site index graph must be a tree.") - is_leaf(sites, root_vertex) || error("Tree root must be a leaf vertex.") + is_leaf_vertex(sites, root_vertex) || error("Tree root must be a leaf vertex.") os = deepcopy(os) os = sorteachterm(os, sites, root_vertex) - os = ITensorMPS.sortmergeterms(os) # not exported - if algorithm == "svd" - T = ttn_svd(os, sites, root_vertex; kwargs...) - else - error("Currently only SVD is supported as TTN constructor backend.") - end - - if splitblocks - error("splitblocks not yet implemented for AbstractTreeTensorNetwork.") - T = ITensors.splitblocks(linkinds, T) # TODO: make this work - end - return T + os = ITensorMPS.sortmergeterms(os) + return ttn_svd(os, sites, root_vertex; kwargs...) end function mpo(os::OpSum, external_inds::Vector; kwargs...) diff --git a/src/treetensornetworks/projttns/abstractprojttn.jl b/src/treetensornetworks/projttns/abstractprojttn.jl index d86cc48a..040f7f56 100644 --- a/src/treetensornetworks/projttns/abstractprojttn.jl +++ b/src/treetensornetworks/projttns/abstractprojttn.jl @@ -2,7 +2,8 @@ using DataGraphs: DataGraphs, underlying_graph using Graphs: neighbors using ITensors: ITensor, contract, order, product using ITensors.ITensorMPS: ITensorMPS, nsite -using NamedGraphs: NamedGraphs, NamedEdge, incident_edges, vertextype +using NamedGraphs: NamedGraphs, NamedEdge, vertextype +using NamedGraphs.GraphsExtensions: incident_edges abstract type AbstractProjTTN{V} end @@ -35,7 +36,7 @@ function sites(P::AbstractProjTTN{V}) where {V} return pos(P) end -function NamedGraphs.incident_edges(P::AbstractProjTTN{V})::Vector{NamedEdge{V}} where {V} +function NamedGraphs.incident_edges(P::AbstractProjTTN{V}) where {V} on_edge(P) && return [pos(P), reverse(pos(P))] edges = [ [edgetype(P)(n => v) for n in setdiff(neighbors(underlying_graph(P), v), sites(P))] for @@ -44,7 +45,7 @@ function NamedGraphs.incident_edges(P::AbstractProjTTN{V})::Vector{NamedEdge{V}} return collect(Base.Iterators.flatten(edges)) end -function internal_edges(P::AbstractProjTTN{V})::Vector{NamedEdge{V}} where {V} +function internal_edges(P::AbstractProjTTN{V}) where {V} on_edge(P) && return edgetype(P)[] edges = [ [edgetype(P)(v => n) for n in neighbors(underlying_graph(P), v) ∩ sites(P)] for diff --git a/src/treetensornetworks/projttns/projouterprodttn.jl b/src/treetensornetworks/projttns/projouterprodttn.jl index 20caf093..f3e50900 100644 --- a/src/treetensornetworks/projttns/projouterprodttn.jl +++ b/src/treetensornetworks/projttns/projouterprodttn.jl @@ -1,7 +1,7 @@ using DataGraphs: DataGraphs using Dictionaries: set! using ITensors: ITensor -using NamedGraphs: incident_edges +using NamedGraphs.GraphsExtensions: incident_edges, is_leaf_vertex struct ProjOuterProdTTN{V} <: AbstractProjTTN{V} pos::Union{Vector{<:V},NamedEdge{V}} @@ -54,7 +54,7 @@ function make_environment(P::ProjOuterProdTTN, state::AbstractTTN, e::AbstractEd reverse(e) ∈ incident_edges(P) || (P = invalidate_environment(P, reverse(e))) # do nothing if valid environment already present if !haskey(environments(P), e) - if is_leaf(underlying_graph(P), src(e)) + if is_leaf_vertex(underlying_graph(P), src(e)) # leaves are easy env = internal_state(P)[src(e)] * operator(P)[src(e)] * dag(state[src(e)]) else diff --git a/src/treetensornetworks/projttns/projttn.jl b/src/treetensornetworks/projttns/projttn.jl index 06714feb..f0e5d90d 100644 --- a/src/treetensornetworks/projttns/projttn.jl +++ b/src/treetensornetworks/projttns/projttn.jl @@ -1,18 +1,23 @@ using DataGraphs: DataGraphs, underlying_graph -using Dictionaries: Dictionary +using Dictionaries: Dictionary, Indices using Graphs: edgetype, vertices using ITensors: ITensor -using NamedGraphs: NamedEdge, incident_edges +using NamedGraphs: NamedEdge +using NamedGraphs.GraphsExtensions: incident_edges, is_leaf_vertex """ ProjTTN """ -struct ProjTTN{V} <: AbstractProjTTN{V} - pos::Union{Vector{<:V},NamedEdge{V}} # TODO: cleanest way to specify effective Hamiltonian position? +struct ProjTTN{V,Pos<:Union{Indices{V},NamedEdge{V}}} <: AbstractProjTTN{V} + pos::Pos operator::TTN{V} environments::Dictionary{NamedEdge{V},ITensor} end +function ProjTTN(pos::Vector, operator::TTN, environments::Dictionary) + return ProjTTN(Indices(pos), operator, environments) +end + function ProjTTN(operator::TTN) return ProjTTN(vertices(operator), operator, Dictionary{edgetype(operator),ITensor}()) end @@ -47,7 +52,7 @@ function make_environment(P::ProjTTN, state::AbstractTTN, e::AbstractEdge) reverse(e) ∈ incident_edges(P) || (P = invalidate_environment(P, reverse(e))) # do nothing if valid environment already present if !haskey(environments(P), e) - if is_leaf(underlying_graph(P), src(e)) + if is_leaf_vertex(underlying_graph(P), src(e)) # leaves are easy env = state[src(e)] * operator(P)[src(e)] * dag(prime(state[src(e)])) else diff --git a/src/treetensornetworks/projttns/projttnsum.jl b/src/treetensornetworks/projttns/projttnsum.jl index 73b87af8..42ae6a05 100644 --- a/src/treetensornetworks/projttns/projttnsum.jl +++ b/src/treetensornetworks/projttns/projttnsum.jl @@ -1,6 +1,7 @@ using ITensors: ITensors, contract, product using ITensors.LazyApply: LazyApply, terms -using NamedGraphs: NamedGraphs, incident_edges +using NamedGraphs: NamedGraphs +using NamedGraphs.GraphsExtensions: incident_edges """ ProjTTNSum diff --git a/src/treetensornetworks/ttn.jl b/src/treetensornetworks/treetensornetwork.jl similarity index 83% rename from src/treetensornetworks/ttn.jl rename to src/treetensornetworks/treetensornetwork.jl index a30c0776..405844fb 100644 --- a/src/treetensornetworks/ttn.jl +++ b/src/treetensornetworks/treetensornetwork.jl @@ -1,21 +1,27 @@ +using Dictionaries: Indices using Graphs: path_graph using ITensors: ITensor using LinearAlgebra: factorize, normalize -using NamedGraphs: vertextype +using NamedGraphs.GraphsExtensions: GraphsExtensions, vertextype """ TreeTensorNetwork{V} <: AbstractTreeTensorNetwork{V} """ struct TreeTensorNetwork{V} <: AbstractTreeTensorNetwork{V} tensornetwork::ITensorNetwork{V} - ortho_region::Vector{V} - global function _TreeTensorNetwork(tensornetwork::ITensorNetwork, ortho_region) + ortho_region::Indices{V} + global function _TreeTensorNetwork(tensornetwork::ITensorNetwork, ortho_region::Indices) @assert is_tree(tensornetwork) return new{vertextype(tensornetwork)}(tensornetwork, ortho_region) end - global function _TreeTensorNetwork(tensornetwork::ITensorNetwork) - return _TreeTensorNetwork(tensornetwork, vertices(tensornetwork)) - end +end + +function _TreeTensorNetwork(tensornetwork::ITensorNetwork, ortho_region::Vector) + return _TreeTensorNetwork(tensornetwork, Indices(ortho_region)) +end + +function _TreeTensorNetwork(tensornetwork::ITensorNetwork) + return _TreeTensorNetwork(tensornetwork, vertices(tensornetwork)) end function TreeTensorNetwork(tn::ITensorNetwork; ortho_region=vertices(tn)) @@ -72,7 +78,12 @@ function mps(f, is::Vector{<:Index}; kwargs...) end # Construct from dense ITensor, using IndsNetwork of site indices. -function ttn(a::ITensor, is::IndsNetwork; ortho_region=[default_root_vertex(is)], kwargs...) +function ttn( + a::ITensor, + is::IndsNetwork; + ortho_region=Indices([GraphsExtensions.default_root_vertex(is)]), + kwargs..., +) for v in vertices(is) @assert hasinds(a, is[v]) end diff --git a/src/usings.jl b/src/usings.jl deleted file mode 100644 index c06e62ba..00000000 --- a/src/usings.jl +++ /dev/null @@ -1 +0,0 @@ -using SimpleTraits: SimpleTraits diff --git a/src/utility.jl b/src/utility.jl deleted file mode 100644 index 0a4150a9..00000000 --- a/src/utility.jl +++ /dev/null @@ -1,18 +0,0 @@ -using ITensors: OpSum -""" -Relabel sites in OpSum according to given site map -""" -function relabel_sites(O::OpSum, vmap::AbstractDictionary) - Oout = OpSum() - for term in Ops.terms(O) - c = Ops.coefficient(term) - p = Ops.argument(term) - # swap sites for every Op in product and multiply resulting Ops - pout = prod([ - Op(Ops.which_op(o), map(v -> vmap[v], Ops.sites(o))...; Ops.params(o)...) for o in p - ]) - # add to new OpSum - Oout += c * pout - end - return Oout -end diff --git a/src/utils.jl b/src/utils.jl index f4293b67..c1e1cde0 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -1,16 +1,9 @@ using Dictionaries: getindices -to_tuple(x) = (x,) -to_tuple(x::Tuple) = x - function cartesian_to_linear(dims::Tuple) return Dictionary(vec(Tuple.(CartesianIndices(dims))), 1:prod(dims)) end -# Convert to real if possible -maybe_real(x::Real) = x -maybe_real(x::Complex) = iszero(imag(x)) ? real(x) : x - front(itr, n=1) = Iterators.take(itr, length(itr) - n) tail(itr) = Iterators.drop(itr, 1) diff --git a/src/visualize.jl b/src/visualize.jl index 5c0b17ff..ad814cf3 100644 --- a/src/visualize.jl +++ b/src/visualize.jl @@ -1,3 +1,4 @@ +# TODO: Move to `ITensorNetworksITensors.ITensorVisualizationCoreExt`. using DataGraphs: AbstractDataGraph, underlying_graph using Graphs: vertices using ITensors.ITensorVisualizationCore: ITensorVisualizationCore, visualize diff --git a/test/test_abstractgraph.jl b/test/test_abstractgraph.jl index d44ee85c..2a67af60 100644 --- a/test/test_abstractgraph.jl +++ b/test/test_abstractgraph.jl @@ -1,18 +1,19 @@ @eval module $(gensym()) -using NamedGraphs: add_edge!, add_vertex!, NamedDiGraph -using ITensorNetworks: _root, _is_rooted, _is_rooted_directed_binary_tree +using Graphs: add_edge!, add_vertex! +using NamedGraphs: NamedDiGraph +using NamedGraphs.GraphsExtensions: root_vertex, is_rooted, is_binary_arborescence using Test: @test, @testset @testset "test rooted directed graphs" begin g = NamedDiGraph([1, 2, 3]) - @test !_is_rooted(g) + @test !is_rooted(g) add_edge!(g, 1, 2) add_edge!(g, 1, 3) - @test _is_rooted(g) - @test _root(g) == 1 - @test _is_rooted_directed_binary_tree(g) + @test is_rooted(g) + @test root_vertex(g) == 1 + @test is_binary_arborescence(g) add_vertex!(g, 4) add_edge!(g, 1, 4) - @test !_is_rooted_directed_binary_tree(g) + @test !is_binary_arborescence(g) end end diff --git a/test/test_additensornetworks.jl b/test/test_additensornetworks.jl index 279f3b2f..88b12a1d 100644 --- a/test/test_additensornetworks.jl +++ b/test/test_additensornetworks.jl @@ -1,6 +1,7 @@ @eval module $(gensym()) using Graphs: rem_edge!, vertices -using NamedGraphs: NamedEdge, hexagonal_lattice_graph, named_grid +using NamedGraphs: NamedEdge +using NamedGraphs.NamedGraphGenerators: named_grid using ITensorNetworks: ITensorNetwork, inner_network, random_tensornetwork, siteinds using ITensors: ITensors, apply, op, scalar, inner using LinearAlgebra: norm_sqr diff --git a/test/test_apply.jl b/test/test_apply.jl index fab04ceb..ef0a8fdc 100644 --- a/test/test_apply.jl +++ b/test/test_apply.jl @@ -12,7 +12,8 @@ using ITensorNetworks: siteinds, update using ITensors: ITensors, inner, op -using NamedGraphs: PartitionVertex, named_grid +using NamedGraphs.NamedGraphGenerators: named_grid +using NamedGraphs.PartitionedGraphs: PartitionVertex using Random: Random using SplitApplyCombine: group using Test: @test, @testset diff --git a/test/test_belief_propagation.jl b/test/test_belief_propagation.jl index 20b73fdc..275d5b91 100644 --- a/test/test_belief_propagation.jl +++ b/test/test_belief_propagation.jl @@ -12,6 +12,7 @@ using ITensorNetworks: contract, contract_boundary_mps, contraction_sequence, + eachtensor, environment, flatten_networks, linkinds_combiners, @@ -25,7 +26,9 @@ using ITensors: ITensors, ITensor, combiner, dag, inds, inner, op, prime, random using ITensorNetworks.ModelNetworks: ModelNetworks using ITensors.NDTensors: array using LinearAlgebra: eigvals, tr -using NamedGraphs: NamedEdge, PartitionVertex, named_comb_tree, named_grid +using NamedGraphs: NamedEdge +using NamedGraphs.NamedGraphGenerators: named_comb_tree, named_grid +using NamedGraphs.PartitionedGraphs: PartitionVertex using Random: Random using SplitApplyCombine: group using Test: @test, @testset @@ -150,8 +153,9 @@ using Test: @test, @testset ψOψ = combine_linkinds(ψOψ, combiners) bpc = BeliefPropagationCache(ψψ, group(v -> v[1], vertices(ψψ))) - message_update_func(tns; kwargs...) = - Vector{ITensor}(first(contract(ITensorNetwork(tns); alg="density_matrix", kwargs...))) + message_update_func(tns; kwargs...) = collect( + eachtensor(first(contract(ITensorNetwork(tns); alg="density_matrix", kwargs...))) + ) bpc = update( bpc; message_update=message_update_func, message_update_kwargs=(; cutoff=1e-6, maxdim=4) ) diff --git a/test/test_binary_tree_partition.jl b/test/test_binary_tree_partition.jl index 4eea1922..499c7547 100644 --- a/test/test_binary_tree_partition.jl +++ b/test/test_binary_tree_partition.jl @@ -6,18 +6,20 @@ using ITensors.ITensorMPS: MPS using ITensorNetworks: _DensityMartrixAlgGraph, _contract_deltas_ignore_leaf_partitions, - _is_rooted_directed_binary_tree, _mps_partition_inds_order, _mincut_partitions, _partition, _rem_vertex!, - _root, IndsNetwork, ITensorNetwork, binary_tree_structure, + eachtensor, path_graph_structure, random_tensornetwork -using NamedGraphs: NamedEdge, named_grid, post_order_dfs_vertices +using NamedGraphs: NamedEdge, NamedGraph +using NamedGraphs.NamedGraphGenerators: named_grid +using NamedGraphs.GraphsExtensions: + is_binary_arborescence, post_order_dfs_vertices, root_vertex using OMEinsumContractionOrders: OMEinsumContractionOrders using Test: @test, @testset @@ -36,7 +38,7 @@ using Test: @test, @testset tn = ITensorNetwork(M[:]) for out in [binary_tree_structure(tn), path_graph_structure(tn)] @test out isa DataGraph - @test _is_rooted_directed_binary_tree(out) + @test is_binary_arborescence(out) @test length(vertex_data(out).values) == 8 end out = _mps_partition_inds_order(tn, [o, p, i, j, k, l, m, n]) @@ -64,7 +66,7 @@ end tn = ITensorNetwork(vec(tn[:, :, 1])) for out in [binary_tree_structure(tn), path_graph_structure(tn)] @test out isa DataGraph - @test _is_rooted_directed_binary_tree(out) + @test is_binary_arborescence(out) @test length(vertex_data(out).values) == 9 end end @@ -73,9 +75,8 @@ end inds = [Index(2, "$i") for i in 1:5] tn = ITensorNetwork([randomITensor(i) for i in inds]) par = _partition(tn, binary_tree_structure(tn); alg="mincut_recursive_bisection") - networks = [Vector{ITensor}(par[v]) for v in vertices(par)] - network = vcat(networks...) - @test isapprox(contract(Vector{ITensor}(tn)), contract(network...)) + network = mapreduce(v -> collect(eachtensor(par[v])), vcat, vertices(par)) + @test isapprox(contract(tn), contract(network)) end @testset "test partition with mincut_recursive_bisection alg and approx_itensornetwork" begin @@ -84,7 +85,7 @@ end k = Index(2, "k") l = Index(2, "l") m = Index(2, "m") - for dtype in [Float64, ComplexF64] + for dtype in (Float64, Complex{Float64}) T = randomITensor(dtype, i, j, k, l, m) M = MPS(T, (i, j, k, l, m); cutoff=1e-5, maxdim=5) network = M[:] @@ -92,18 +93,21 @@ end tn = ITensorNetwork(network) inds_btree = binary_tree_structure(tn) par = _partition(tn, inds_btree; alg="mincut_recursive_bisection") - par = _contract_deltas_ignore_leaf_partitions(par; root=_root(inds_btree)) - networks = [Vector{ITensor}(par[v]) for v in vertices(par)] - network2 = vcat(networks...) - out2 = contract(network2...) + par = _contract_deltas_ignore_leaf_partitions(par; root=root_vertex(inds_btree)) + networks = map(v -> par[v], vertices(par)) + network2 = reduce(union, networks) + out2 = contract(network2) @test isapprox(out1, out2) # test approx_itensornetwork (here we call `contract` to test the interface) for structure in [path_graph_structure, binary_tree_structure] for alg in ["density_matrix", "ttn_svd"] approx_tn, lognorm = contract( - tn; alg=alg, output_structure=structure, contraction_sequence_alg="sa_bipartite" + tn; + alg=alg, + output_structure=structure, + contraction_sequence_kwargs=(; alg="sa_bipartite"), ) - network3 = Vector{ITensor}(approx_tn) + network3 = collect(eachtensor(approx_tn)) out3 = contract(network3...) * exp(lognorm) i1 = noncommoninds(network...) i3 = noncommoninds(network3...) @@ -128,21 +132,20 @@ end underlying_tree = underlying_graph(input_partition) # Change type of each partition[v] since they will be updated # with potential data type chage. - p = DataGraph() + p = DataGraph(NamedGraph()) for v in vertices(input_partition) add_vertex!(p, v) p[v] = ITensorNetwork{Any}(input_partition[v]) end - alg_graph = _DensityMartrixAlgGraph(p, underlying_tree, _root(out_tree)) - path = post_order_dfs_vertices(underlying_tree, _root(out_tree)) + alg_graph = _DensityMartrixAlgGraph(p, underlying_tree, root_vertex(out_tree)) + path = post_order_dfs_vertices(underlying_tree, root_vertex(out_tree)) for v in path[1:2] _rem_vertex!( alg_graph, v; cutoff=1e-15, maxdim=10000, - contraction_sequence_alg="optimal", - contraction_sequence_kwargs=(;), + contraction_sequence_kwargs=(; alg="optimal"), ) end # Check that a specific density matrix info has been cached diff --git a/test/test_contract_deltas.jl b/test/test_contract_deltas.jl index 8b7add9b..24ef9bc4 100644 --- a/test/test_contract_deltas.jl +++ b/test/test_contract_deltas.jl @@ -2,17 +2,19 @@ using Graphs: dfs_tree, nv, vertices using ITensors: Index, ITensor, delta, noncommoninds, randomITensor using ITensorNetworks: + IndsNetwork, + ITensorNetwork, _contract_deltas, _contract_deltas_ignore_leaf_partitions, _noncommoninds, _partition, - _root, binary_tree_structure, - IndsNetwork, - ITensorNetwork, + eachtensor, + flatten_siteinds, path_graph_structure, random_tensornetwork -using NamedGraphs: leaf_vertices, named_grid +using NamedGraphs.GraphsExtensions: leaf_vertices, root_vertex +using NamedGraphs.NamedGraphGenerators: named_grid using Test: @test, @testset @testset "test _contract_deltas with no deltas" begin @@ -31,8 +33,7 @@ end tn = ITensorNetwork([a, b, delta1, delta2]) tn2 = _contract_deltas(tn) @test nv(tn2) == 3 - @test Set(noncommoninds(Vector{ITensor}(tn)...)) == - Set(noncommoninds(Vector{ITensor}(tn2)...)) + @test issetequal(flatten_siteinds(tn), flatten_siteinds(tn2)) end @testset "test _contract_deltas over partition" begin @@ -46,7 +47,7 @@ end tn = ITensorNetwork(vec(tn[:, :, 1])) for inds_tree in [binary_tree_structure(tn), path_graph_structure(tn)] par = _partition(tn, inds_tree; alg="mincut_recursive_bisection") - root = _root(inds_tree) + root = root_vertex(inds_tree) par_contract_deltas = _contract_deltas_ignore_leaf_partitions(par; root=root) @test Set(_noncommoninds(par)) == Set(_noncommoninds(par_contract_deltas)) leaves = leaf_vertices(dfs_tree(par_contract_deltas, root)) diff --git a/test/test_contraction_sequence.jl b/test/test_contraction_sequence.jl index 5373002d..e0fab70c 100644 --- a/test/test_contraction_sequence.jl +++ b/test/test_contraction_sequence.jl @@ -3,7 +3,7 @@ using EinExprs: Exhaustive, Greedy, HyPar using ITensorNetworks: contraction_sequence, norm_sqr_network, random_tensornetwork, siteinds using ITensors: ITensors, contract -using NamedGraphs: named_grid +using NamedGraphs.NamedGraphGenerators: named_grid using OMEinsumContractionOrders: OMEinsumContractionOrders using Random: Random using Test: @test, @testset diff --git a/test/test_contraction_sequence_to_graph.jl b/test/test_contraction_sequence_to_graph.jl index 4825d29c..5e093dca 100644 --- a/test/test_contraction_sequence_to_graph.jl +++ b/test/test_contraction_sequence_to_graph.jl @@ -1,19 +1,17 @@ @eval module $(gensym()) using Graphs: vertices using ITensorNetworks: - _root, contraction_sequence, contraction_sequence_to_digraph, contraction_sequence_to_graph, - internal_edges, contraction_tree_leaf_bipartition, - distance_to_leaf, flatten_networks, - leaf_vertices, random_tensornetwork, siteinds using Test: @test, @testset -using NamedGraphs: is_leaf, leaf_vertices, named_grid +using NamedGraphs.GraphsExtensions: + is_leaf_vertex, leaf_vertices, non_leaf_edges, root_vertex +using NamedGraphs.NamedGraphGenerators: named_grid @testset "contraction_sequence_to_graph" begin n = 3 @@ -30,20 +28,20 @@ using NamedGraphs: is_leaf, leaf_vertices, named_grid g_seq_leaves = leaf_vertices(g_directed_seq) @test length(g_seq_leaves) == n * n @test 2 * length(g_seq_leaves) - 1 == length(vertices(g_directed_seq)) - @test _root(g_directed_seq)[3] == [] + @test root_vertex(g_directed_seq)[3] == [] g_seq = contraction_sequence_to_graph(seq) @test length(g_seq_leaves) == n * n @test 2 * length(g_seq_leaves) - 2 == length(vertices(g_seq)) - for eb in internal_edges(g_seq) + for eb in non_leaf_edges(g_seq) vs = contraction_tree_leaf_bipartition(g_seq, eb) @test length(vs) == 2 @test Set([v.I for v in vcat(vs[1], vs[2])]) == Set(vertices(ψψ)) end #Check all internal vertices define a correct tripartition and all leaf vertices define a bipartition (tensor on that leafs vs tensor on rest of tree) for v in vertices(g_seq) - if (!is_leaf(g_seq, v)) + if (!is_leaf_vertex(g_seq, v)) @test length(v) == 3 @test Set([vsi.I for vsi in vcat(v[1], v[2], v[3])]) == Set(vertices(ψψ)) else diff --git a/test/test_forms.jl b/test/test_forms.jl index e6cda5cd..e0edb597 100644 --- a/test/test_forms.jl +++ b/test/test_forms.jl @@ -1,7 +1,7 @@ @eval module $(gensym()) using DataGraphs: underlying_graph using Graphs: nv -using NamedGraphs +using NamedGraphs.NamedGraphGenerators: named_grid using ITensorNetworks: BeliefPropagationCache, BilinearFormNetwork, diff --git a/test/test_gauging.jl b/test/test_gauging.jl index 1c7bff7d..2c8b6f8a 100644 --- a/test/test_gauging.jl +++ b/test/test_gauging.jl @@ -12,7 +12,7 @@ using ITensorNetworks: using ITensors: diagITensor, inds, inner using ITensors.NDTensors: vector using LinearAlgebra: diag -using NamedGraphs: named_grid +using NamedGraphs.NamedGraphGenerators: named_grid using Random: Random using Test: @test, @testset diff --git a/test/test_indsnetwork.jl b/test/test_indsnetwork.jl index 30662263..acfee2e6 100644 --- a/test/test_indsnetwork.jl +++ b/test/test_indsnetwork.jl @@ -5,7 +5,7 @@ using Graphs: edges, ne, nv, vertices using ITensorNetworks: IndsNetwork, union_all_inds using ITensors: Index using ITensors.NDTensors: dim -using NamedGraphs: named_comb_tree +using NamedGraphs.NamedGraphGenerators: named_comb_tree using Random: Random using Test: @test, @testset diff --git a/test/test_itensornetwork.jl b/test/test_itensornetwork.jl index 7012a1b5..36d29657 100644 --- a/test/test_itensornetwork.jl +++ b/test/test_itensornetwork.jl @@ -53,7 +53,10 @@ using ITensorNetworks: siteinds, ttn using LinearAlgebra: factorize -using NamedGraphs: NamedEdge, incident_edges, named_comb_tree, named_grid +using NamedGraphs: NamedEdge +using NamedGraphs.GraphsExtensions: incident_edges +using NamedGraphs.NamedGraphGenerators: named_comb_tree, named_grid +using NDTensors: NDTensors, dim using Random: Random, randn! using Test: @test, @test_broken, @testset diff --git a/test/test_opsum_to_ttn.jl b/test/test_opsum_to_ttn.jl index c99b4adf..2bcbd556 100644 --- a/test/test_opsum_to_ttn.jl +++ b/test/test_opsum_to_ttn.jl @@ -1,6 +1,6 @@ @eval module $(gensym()) using DataGraphs: vertex_data -using Dictionaries: Dictionary +using Dictionaries: Dictionary, getindices using Graphs: add_vertex!, rem_vertex!, add_edge!, rem_edge!, vertices using ITensors: ITensors, @@ -16,11 +16,13 @@ using ITensors: using ITensors.ITensorMPS: ITensorMPS using ITensors.NDTensors: matrix using ITensorGaussianMPS: ITensorGaussianMPS -using ITensorNetworks: ITensorNetworks, OpSum, ttn, relabel_sites, siteinds +using ITensorNetworks: ITensorNetworks, OpSum, ttn, siteinds +using ITensorNetworks.ITensorsExtensions: replace_vertices using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using KrylovKit: eigsolve using LinearAlgebra: eigvals, norm -using NamedGraphs: leaf_vertices, named_comb_tree, named_grid, post_order_dfs_vertices +using NamedGraphs.GraphsExtensions: leaf_vertices, post_order_dfs_vertices +using NamedGraphs.NamedGraphGenerators: named_comb_tree, named_grid using Test: @test, @test_broken, @testset function to_matrix(t::ITensor) @@ -34,7 +36,6 @@ end @testset "OpSum to TTN" begin # small comb tree auto_fermion_enabled = ITensors.using_auto_fermion() - ITensors.disable_auto_fermion() # ToDo: remove when autofermion incompatibility with no QNs is fixed tooth_lengths = fill(2, 3) c = named_comb_tree(tooth_lengths) @@ -42,7 +43,7 @@ end # linearized version linear_order = [4, 1, 2, 5, 3, 6] - vmap = Dictionary(vertices(is)[linear_order], 1:length(linear_order)) + vmap = Dictionary(collect(vertices(is))[linear_order], eachindex(linear_order)) sites = only.(collect(vertex_data(is)))[linear_order] # test with next-to-nearest-neighbor Ising Hamiltonian @@ -62,9 +63,9 @@ end @testset "Svd approach" for root_vertex in leaf_vertices(is) # get TTN Hamiltonian directly - Hsvd = ttn(H, is; root_vertex=root_vertex, cutoff=1e-10) + Hsvd = ttn(H, is; root_vertex, cutoff=1e-10) # get corresponding MPO Hamiltonian - Hline = ITensorMPS.MPO(relabel_sites(H, vmap), sites) + Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], H), sites) # compare resulting dense Hamiltonians @disable_warn_order begin Tttno = prod(Hline) @@ -72,9 +73,8 @@ end end @test Tttno ≈ Tmpo rtol = 1e-6 - # this breaks for longer range interactions - Hsvd_lr = ttn(Hlr, is; root_vertex=root_vertex, algorithm="svd", cutoff=1e-10) - Hline_lr = ITensorMPS.MPO(relabel_sites(Hlr, vmap), sites) + Hsvd_lr = ttn(Hlr, is; root_vertex, cutoff=1e-10) + Hline_lr = ITensorMPS.MPO(replace_vertices(v -> vmap[v], Hlr), sites) @disable_warn_order begin Tttno_lr = prod(Hline_lr) Tmpo_lr = contract(Hsvd_lr) @@ -88,7 +88,9 @@ end @testset "Multiple onsite terms (regression test for issue #62)" begin auto_fermion_enabled = ITensors.using_auto_fermion() - ITensors.disable_auto_fermion() # ToDo: remove when autofermion incompatibility with no QNs is fixed + if !auto_fermion_enabled + ITensors.enable_auto_fermion() + end grid_dims = (2, 1) g = named_grid(grid_dims) s = siteinds("S=1/2", g) @@ -119,7 +121,7 @@ end # linearized version linear_order = [4, 1, 2, 5, 3, 6] - vmap = Dictionary(vertices(is)[linear_order], 1:length(linear_order)) + vmap = Dictionary(collect(vertices(is))[linear_order], eachindex(linear_order)) sites = only.(collect(vertex_data(is)))[linear_order] # test with next-to-nearest-neighbor Ising Hamiltonian @@ -139,9 +141,9 @@ end @testset "Svd approach" for root_vertex in leaf_vertices(is) # get TTN Hamiltonian directly - Hsvd = ttn(H, is; root_vertex=root_vertex, cutoff=1e-10) + Hsvd = ttn(H, is; root_vertex, cutoff=1e-10) # get corresponding MPO Hamiltonian - Hline = ITensorMPS.MPO(relabel_sites(H, vmap), sites) + Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], H), sites) # compare resulting sparse Hamiltonians @disable_warn_order begin @@ -150,9 +152,8 @@ end end @test Tttno ≈ Tmpo rtol = 1e-6 - # this breaks for longer range interactions ###not anymore - Hsvd_lr = ttn(Hlr, is; root_vertex=root_vertex, algorithm="svd", cutoff=1e-10) - Hline_lr = ITensorMPS.MPO(relabel_sites(Hlr, vmap), sites) + Hsvd_lr = ttn(Hlr, is; root_vertex, cutoff=1e-10) + Hline_lr = ITensorMPS.MPO(replace_vertices(v -> vmap[v], Hlr), sites) @disable_warn_order begin Tttno_lr = prod(Hline_lr) Tmpo_lr = contract(Hsvd_lr) @@ -183,13 +184,13 @@ end @testset "Svd approach" for root_vertex in leaf_vertices(is) # get TTN Hamiltonian directly - Hsvd = ttn(H, is; root_vertex=root_vertex, cutoff=1e-10) + Hsvd = ttn(H, is; root_vertex, cutoff=1e-10) # get corresponding MPO Hamiltonian sites = [only(is[v]) for v in reverse(post_order_dfs_vertices(c, root_vertex))] vmap = Dictionary(reverse(post_order_dfs_vertices(c, root_vertex)), 1:length(sites)) - Hline = ITensorMPS.MPO(relabel_sites(H, vmap), sites) + Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], H), sites) # compare resulting sparse Hamiltonians - Hmat_sp = ITensorGaussianMPS.hopping_hamiltonian(relabel_sites(H, vmap)) + Hmat_sp = ITensorGaussianMPS.hopping_hamiltonian(replace_vertices(v -> vmap[v], H)) @disable_warn_order begin Tmpo = prod(Hline) Tttno = contract(Hsvd) @@ -200,7 +201,8 @@ end @test norm(Tttno) > 0 @test norm(Tmpo) ≈ norm(Tttno) rtol = 1e-6 - @test_broken Tmpo ≈ Tttno # ToDo fix comparison for fermionic tensors + # TODO: fix comparison for fermionic tensors + @test_broken Tmpo ≈ Tttno # In the meantime: matricize tensors and convert to dense Matrix to compare element by element dTmm = to_matrix(Tmpo) dTtm = to_matrix(Tttno) @@ -235,14 +237,14 @@ end # linearized version linear_order = [4, 1, 2, 5, 3, 6] - vmap = Dictionary(vertices(is)[linear_order], 1:length(linear_order)) + vmap = Dictionary(collect(vertices(is))[linear_order], eachindex(linear_order)) sites = only.(filter(d -> !isempty(d), collect(vertex_data(is_missing_site))))[linear_order] J1 = -1 J2 = 2 h = 0.5 # connectivity of the Hamiltonian is that of the original comb graph - H = ModelHamiltonians.heisenberg(c; J1=J1, J2=J2, h=h) + H = ModelHamiltonians.heisenberg(c; J1, J2, h) # add combination of longer range interactions Hlr = copy(H) @@ -253,9 +255,9 @@ end @testset "Svd approach" for root_vertex in leaf_vertices(is) # get TTN Hamiltonian directly - Hsvd = ttn(H, is_missing_site; root_vertex=root_vertex, cutoff=1e-10) + Hsvd = ttn(H, is_missing_site; root_vertex, cutoff=1e-10) # get corresponding MPO Hamiltonian - Hline = ITensorMPS.MPO(relabel_sites(H, vmap), sites) + Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], H), sites) # compare resulting sparse Hamiltonians @disable_warn_order begin @@ -264,10 +266,8 @@ end end @test Tttno ≈ Tmpo rtol = 1e-6 - Hsvd_lr = ttn( - Hlr, is_missing_site; root_vertex=root_vertex, algorithm="svd", cutoff=1e-10 - ) - Hline_lr = ITensorMPS.MPO(relabel_sites(Hlr, vmap), sites) + Hsvd_lr = ttn(Hlr, is_missing_site; root_vertex, cutoff=1e-10) + Hline_lr = ITensorMPS.MPO(replace_vertices(v -> vmap[v], Hlr), sites) @disable_warn_order begin Tttno_lr = prod(Hline_lr) Tmpo_lr = contract(Hsvd_lr) diff --git a/test/test_sitetype.jl b/test/test_sitetype.jl index 443298dd..77075d8d 100644 --- a/test/test_sitetype.jl +++ b/test/test_sitetype.jl @@ -5,7 +5,7 @@ using Graphs: nv, vertices using ITensorNetworks: IndsNetwork, siteinds using ITensors: SiteType, hastags, space using ITensors.NDTensors: dim -using NamedGraphs: named_grid +using NamedGraphs.NamedGraphGenerators: named_grid using Test: @test, @testset @testset "Site ind system" begin diff --git a/test/test_tebd.jl b/test/test_tebd.jl index fe7185f1..5b7f6278 100644 --- a/test/test_tebd.jl +++ b/test/test_tebd.jl @@ -2,10 +2,11 @@ using Graphs: vertices using ITensors: ITensors using ITensors.ITensorMPS: ITensorMPS -using ITensorNetworks: - ITensorNetwork, cartesian_to_linear, dmrg, expect, group_terms, siteinds, tebd +using ITensorNetworks: ITensorNetwork, cartesian_to_linear, dmrg, expect, siteinds, tebd +using ITensorNetworks.ITensorsExtensions: group_terms using ITensorNetworks.ModelHamiltonians: ModelHamiltonians -using NamedGraphs: named_grid, rename_vertices +using NamedGraphs.GraphsExtensions: rename_vertices +using NamedGraphs.NamedGraphGenerators: named_grid using Test: @test, @testset ITensors.disable_warn_order() @@ -22,7 +23,7 @@ ITensors.disable_warn_order() # # DMRG comparison # - g_dmrg = rename_vertices(g, cartesian_to_linear(dims)) + g_dmrg = rename_vertices(v -> cartesian_to_linear(dims)[v], g) ℋ_dmrg = ModelHamiltonians.ising(g_dmrg; h) s_dmrg = [only(s[v]) for v in vertices(s)] H_dmrg = ITensorMPS.MPO(ℋ_dmrg, s_dmrg) diff --git a/test/test_treetensornetworks/test_expect.jl b/test/test_treetensornetworks/test_expect.jl index d8eb365c..3dc5c1b1 100644 --- a/test/test_treetensornetworks/test_expect.jl +++ b/test/test_treetensornetworks/test_expect.jl @@ -3,7 +3,7 @@ using Graphs: vertices using ITensors.ITensorMPS: MPS using ITensorNetworks: ttn, expect, random_mps, siteinds using LinearAlgebra: norm -using NamedGraphs: named_comb_tree +using NamedGraphs.NamedGraphGenerators: named_comb_tree using Test: @test, @testset @testset "MPS expect comparison with ITensors" begin diff --git a/test/test_treetensornetworks/test_position.jl b/test/test_treetensornetworks/test_position.jl index 92988ce7..6e5fdd44 100644 --- a/test/test_treetensornetworks/test_position.jl +++ b/test/test_treetensornetworks/test_position.jl @@ -3,8 +3,8 @@ using Graphs: vertices using ITensors: ITensors using ITensorNetworks: ProjTTN, ttn, environments, position, siteinds using ITensorNetworks.ModelHamiltonians: ModelHamiltonians -using NamedGraphs: named_comb_tree -using Test +using NamedGraphs.NamedGraphGenerators: named_comb_tree +using Test: @test, @testset @testset "ProjTTN position" begin # make a nontrivial TTN state and TTN operator @@ -34,7 +34,7 @@ using Test psi = ttn(states, s) # actual test, verifies that position is out of place - vs = vertices(s) + vs = collect(vertices(s)) PH = ProjTTN(H) PH = position(PH, psi, [vs[2]]) original_keys = deepcopy(keys(environments(PH))) diff --git a/test/test_treetensornetworks/test_solvers/test_contract.jl b/test/test_treetensornetworks/test_solvers/test_contract.jl index f21a558e..29d238f0 100644 --- a/test/test_treetensornetworks/test_solvers/test_contract.jl +++ b/test/test_treetensornetworks/test_solvers/test_contract.jl @@ -19,7 +19,7 @@ using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using ITensors: prime, replaceinds, replaceprime using ITensors.ITensorMPS: ITensorMPS using LinearAlgebra: norm, normalize -using NamedGraphs: named_comb_tree +using NamedGraphs.NamedGraphGenerators: named_comb_tree using Test: @test, @test_broken, @testset @testset "Contract MPO" begin @@ -118,7 +118,7 @@ end # Test basic usage for multiple ProjOuterProdTTN with default parameters # BLAS.axpy-like test os_id = OpSum() - os_id += -1, "Id", vertices(s)[1], "Id", vertices(s)[1] + os_id += -1, "Id", first(vertices(s)), "Id", first(vertices(s)) minus_identity = ttn(os_id, s) Hpsi = ITensorNetworks.sum_apply( [(H, psi), (minus_identity, psi)]; alg="fit", init=psi, nsweeps=1 diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg.jl b/test/test_treetensornetworks/test_solvers/test_dmrg.jl index 3680a5d5..e4335fe7 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg.jl @@ -15,11 +15,11 @@ using ITensorNetworks: mpo, random_mps, random_ttn, - relabel_sites, siteinds +using ITensorNetworks.ITensorsExtensions: replace_vertices using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using KrylovKit: eigsolve -using NamedGraphs: named_comb_tree +using NamedGraphs.NamedGraphGenerators: named_comb_tree using Observers: observer using Test: @test, @test_broken, @testset @@ -201,9 +201,9 @@ end # Compare to `ITensors.MPO` version of `dmrg` linear_order = [4, 1, 2, 5, 3, 6] - vmap = Dictionary(vertices(s)[linear_order], 1:length(linear_order)) + vmap = Dictionary(collect(vertices(s))[linear_order], 1:length(linear_order)) sline = only.(collect(vertex_data(s)))[linear_order] - Hline = ITensorMPS.MPO(relabel_sites(os, vmap), sline) + Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], os), sline) psiline = ITensorMPS.randomMPS(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) e2, psi2 = dmrg(Hline, psiline; nsweeps, maxdim, cutoff, outputlevel=0) @@ -233,12 +233,12 @@ end # for conversion to ITensors.MPO linear_order = [4, 1, 2, 5, 3, 6] - vmap = Dictionary(vertices(s)[linear_order], 1:length(linear_order)) + vmap = Dictionary(collect(vertices(s))[linear_order], 1:length(linear_order)) sline = only.(collect(vertex_data(s)))[linear_order] # get MPS / MPO with JW string result ITensors.disable_auto_fermion() - Hline = ITensorMPS.MPO(relabel_sites(os, vmap), sline) + Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], os), sline) psiline = ITensorMPS.randomMPS(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) e_jw, psi_jw = dmrg(Hline, psiline; nsweeps, maxdim, cutoff, outputlevel=0) ITensors.enable_auto_fermion() @@ -257,7 +257,7 @@ end ) # Compare to `ITensors.MPO` version of `dmrg` - Hline = ITensorMPS.MPO(relabel_sites(os, vmap), sline) + Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], os), sline) psiline = ITensorMPS.randomMPS(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) e2, psi2 = dmrg(Hline, psiline; nsweeps, maxdim, cutoff, outputlevel=0) diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl b/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl index 74fb2b2b..adfe936b 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl @@ -6,7 +6,7 @@ using ITensorNetworks: using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using ITensors: @disable_warn_order, array, dag, onehot, uniqueind using LinearAlgebra: eigen, normalize -using NamedGraphs: named_comb_tree +using NamedGraphs.NamedGraphGenerators: named_comb_tree using Random: Random using Test: @test, @testset diff --git a/test/test_treetensornetworks/test_solvers/test_tdvp.jl b/test/test_treetensornetworks/test_solvers/test_tdvp.jl index 126e70e0..f07251e8 100644 --- a/test/test_treetensornetworks/test_solvers/test_tdvp.jl +++ b/test/test_treetensornetworks/test_solvers/test_tdvp.jl @@ -16,7 +16,7 @@ using ITensorNetworks: tdvp using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using LinearAlgebra: norm -using NamedGraphs: named_binary_tree, named_comb_tree +using NamedGraphs.NamedGraphGenerators: named_binary_tree, named_comb_tree using Observers: observer using Test: @testset, @test diff --git a/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl b/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl index 343d4e0d..17f1cc71 100644 --- a/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl +++ b/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl @@ -5,7 +5,8 @@ using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using OrdinaryDiffEq: Tsit5 using KrylovKit: exponentiate using LinearAlgebra: norm -using NamedGraphs: AbstractNamedEdge, named_comb_tree +using NamedGraphs: AbstractNamedEdge +using NamedGraphs.NamedGraphGenerators: named_comb_tree using Test: @test, @test_broken, @testset include( diff --git a/test/test_ttno.jl b/test/test_ttno.jl index 79c25175..95118ba1 100644 --- a/test/test_ttno.jl +++ b/test/test_ttno.jl @@ -3,7 +3,7 @@ using Graphs: vertices using ITensorNetworks: ttn, contract, ortho_region, siteinds, union_all_inds using ITensors: @disable_warn_order, prime, randomITensor using LinearAlgebra: norm -using NamedGraphs: named_comb_tree +using NamedGraphs.NamedGraphGenerators: named_comb_tree using Random: shuffle using Test: @test, @testset @@ -18,7 +18,7 @@ using Test: @test, @testset # operator site inds is_isp = union_all_inds(is, prime(is; links=[])) # specify random linear vertex ordering of graph vertices - vertex_order = shuffle(vertices(c)) + vertex_order = shuffle(collect(vertices(c))) @testset "Construct TTN operator from ITensor or Array" begin cutoff = 1e-10 diff --git a/test/test_ttns.jl b/test/test_ttns.jl index c9ae7344..0a06e6e8 100644 --- a/test/test_ttns.jl +++ b/test/test_ttns.jl @@ -4,7 +4,7 @@ using Graphs: vertices using ITensorNetworks: ttn, contract, ortho_region, siteinds using ITensors: @disable_warn_order, randomITensor using LinearAlgebra: norm -using NamedGraphs: named_comb_tree +using NamedGraphs.NamedGraphGenerators: named_comb_tree using Random: shuffle using Test: @test, @testset @@ -17,7 +17,7 @@ using Test: @test, @testset dmap = v -> rand(1:3) is = siteinds(dmap, c) # specify random linear vertex ordering of graph vertices - vertex_order = shuffle(vertices(c)) + vertex_order = shuffle(collect(vertices(c))) @testset "Construct TTN from ITensor or Array" begin cutoff = 1e-10