Advanced Computing Platform for Theoretical Physics
Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Lei Wang
dTRG
Commits
e28bad7f
Commit
e28bad7f
authored
Dec 10, 2018
by
Lei Wang
Browse files
symmetrize tensors
parent
43862c5f
Changes
2
Hide whitespace changes
Inline
Side-by-side
ctmrg.py
View file @
e28bad7f
import
torch
from
itertools
import
permutations
def
ctmrg
(
T
,
d
,
Dcut
,
no_iter
):
def
ctmrg
(
Tin
,
d
,
Dcut
,
no_iter
):
#symmetrize
T
=
torch
.
zeros_like
(
Tin
)
for
i
in
permutations
([
0
,
1
,
2
,
3
],
4
):
T
=
T
+
Tin
.
permute
(
i
)
T
=
T
/
24.
lnZ
=
0.0
truncation_error
=
0.0
C
=
T
[
0
,
0
,
:,
:]
E
=
T
[:,
0
,
:,
:]
C
=
T
.
sum
((
0
,
1
))
E
=
T
.
sum
(
1
)
D
=
d
for
n
in
range
(
no_iter
):
maxval
=
C
.
max
()
C
=
C
/
maxval
maxval
=
E
.
max
()
E
=
E
/
maxval
A
=
torch
.
einsum
(
'ab,eca,bdg,cdfh->efgh'
,
(
C
,
E
,
E
,
T
)).
contiguous
().
view
(
D
*
d
,
D
*
d
)
D_new
=
min
(
D
*
d
,
Dcut
)
U
,
S
,
V
=
torch
.
svd
(
A
)
...
...
@@ -21,16 +22,30 @@ def ctmrg(T, d, Dcut, no_iter):
P
=
U
[:,
:
D_new
]
# projection operator
C
=
(
P
.
t
()
@
A
@
P
)
#(D, D)
#
C = (C+C.t())/2.
C
=
(
C
+
C
.
t
())
/
2.
ET
=
torch
.
einsum
(
'ldr,adbc->labrc'
,
(
E
,
T
)).
contiguous
().
view
(
D
*
d
,
d
,
D
*
d
)
E
=
torch
.
einsum
(
'li,ldr,rj->idj'
,
(
P
,
ET
,
P
))
#(D, d, D)
E
=
(
E
+
E
.
permute
(
2
,
1
,
0
))
/
2.
D
=
D_new
maxval
=
C
.
max
()
C
=
C
/
maxval
maxval
=
E
.
max
()
E
=
E
/
maxval
Z1
=
torch
.
einsum
(
'ab,bcd,fd,gha,hcij,fjk,lg,mil,mk'
,
(
C
,
E
,
C
,
E
,
T
,
E
,
C
,
E
,
C
))
#CEC = torch.einsum('da,ebd,ce->abc', (C,E,C)).view(1, D**2*d)
#ETE = torch.einsum('abc,lbdr,mdn->almcrn',(E,T,E)).contiguous().view(D**2*d, D**2*d)
#ETE = (ETE+ETE.t())/2.
#Z1 = CEC@ETE@CEC.t()
Z3
=
torch
.
einsum
(
'ab,bc,cd,da'
,
(
C
,
C
,
C
,
C
))
Z2
=
torch
.
einsum
(
'ab,bcd,de,fa,gcf,ge'
,(
C
,
E
,
C
,
C
,
E
,
C
))
lnZ
+=
torch
.
log
(
Z1
)
+
torch
.
log
(
Z3
)
-
2.
*
torch
.
log
(
Z2
)
print
(
Z1
.
item
(),
Z2
.
item
(),
Z3
.
item
())
lnZ
+=
torch
.
log
(
Z1
.
abs
())
+
torch
.
log
(
Z3
.
abs
())
-
2.
*
torch
.
log
(
Z2
.
abs
())
return
lnZ
,
truncation_error
...
...
dimer_covering.py
View file @
e28bad7f
...
...
@@ -11,6 +11,7 @@ torch.manual_seed(42)
#from hotrg2 import hotrg as contraction
#from trg import levin_nave_trg as contraction
from
ctmrg
import
ctmrg
as
contraction
from
itertools
import
permutations
if
__name__
==
'__main__'
:
import
time
...
...
@@ -32,7 +33,14 @@ if __name__=='__main__':
Dcut
=
args
.
Dcut
Niter
=
args
.
Niter
A
=
torch
.
nn
.
Parameter
(
0.01
*
torch
.
randn
(
d
,
D
**
4
,
dtype
=
dtype
,
device
=
device
,
requires_grad
=
True
))
B
=
0.01
*
torch
.
randn
(
d
,
D
,
D
,
D
,
D
,
dtype
=
dtype
,
device
=
device
)
#symmetrize
A
=
torch
.
zeros_like
(
B
)
for
i
in
permutations
([
1
,
2
,
3
,
4
],
4
):
A
=
A
+
B
.
permute
([
0
]
+
list
(
i
))
/
24.
A
=
A
.
view
(
d
,
D
**
4
)
A
=
torch
.
nn
.
Parameter
(
A
)
#dimer covering
T
=
torch
.
zeros
(
d
,
d
,
d
,
d
,
d
,
d
,
dtype
=
dtype
,
device
=
device
)
T
[
0
,
0
,
0
,
0
,
0
,
1
]
=
1.0
...
...
@@ -43,8 +51,8 @@ if __name__=='__main__':
T
[
1
,
0
,
0
,
0
,
0
,
0
]
=
1.0
T
=
T
.
view
(
d
,
d
**
4
,
d
)
#
optimizer = torch.optim.LBFGS([A], max_iter=
1
0)
optimizer
=
torch
.
optim
.
Adam
([
A
])
optimizer
=
torch
.
optim
.
LBFGS
([
A
],
max_iter
=
2
0
)
#
optimizer = torch.optim.Adam([A])
def
closure
():
optimizer
.
zero_grad
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment