Advanced Computing Platform for Theoretical Physics
Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Lei Wang
dTRG
Commits
5868058b
Commit
5868058b
authored
Dec 14, 2018
by
Lei Wang
Browse files
provide lanczos_steps
parent
a255e94e
Changes
2
Show whitespace changes
Inline
Side-by-side
dimer_covering.py
View file @
5868058b
...
...
@@ -19,13 +19,13 @@ if __name__=='__main__':
parser
.
add_argument
(
"-Niter"
,
type
=
int
,
default
=
10
,
help
=
"Niter"
)
parser
.
add_argument
(
"-float32"
,
action
=
'store_true'
,
help
=
"use float32"
)
parser
.
add_argument
(
"-lanczos
"
,
action
=
'store_true'
,
help
=
"lanczos"
)
parser
.
add_argument
(
"-lanczos
_steps"
,
type
=
int
,
default
=
0
,
help
=
"lanczos
steps
"
)
parser
.
add_argument
(
"-cuda"
,
type
=
int
,
default
=-
1
,
help
=
"use GPU"
)
args
=
parser
.
parse_args
()
device
=
torch
.
device
(
"cpu"
if
args
.
cuda
<
0
else
"cuda:"
+
str
(
args
.
cuda
))
dtype
=
torch
.
float32
if
args
.
float32
else
torch
.
float64
if
args
.
lanczos
:
print
(
'
use
lanczos
'
)
if
args
.
lanczos
>
0
:
print
(
'
lanczos steps'
,
args
.
lanczos
_steps
)
d
=
2
# fixed
...
...
@@ -65,8 +65,8 @@ if __name__=='__main__':
#double layer
T2
=
(
A
.
t
()
@
A
).
view
(
D
,
D
,
D
,
D
,
D
,
D
,
D
,
D
).
permute
(
0
,
4
,
1
,
5
,
2
,
6
,
3
,
7
).
contiguous
().
view
(
D
**
2
,
D
**
2
,
D
**
2
,
D
**
2
)
t0
=
time
.
time
()
lnT
=
contraction
(
T1
,
D
**
2
*
d
,
Dcut
,
Niter
,
A1
,
use_
lanczos
=
args
.
lanczos
)
lnZ
=
contraction
(
T2
,
D
**
2
,
Dcut
,
Niter
,
A2
,
use_
lanczos
=
args
.
lanczos
)
lnT
=
contraction
(
T1
,
D
**
2
*
d
,
Dcut
,
Niter
,
A1
,
lanczos
_steps
=
args
.
lanczos
_steps
)
lnZ
=
contraction
(
T2
,
D
**
2
,
Dcut
,
Niter
,
A2
,
lanczos
_steps
=
args
.
lanczos
_steps
)
loss
=
(
-
lnT
+
lnZ
)
print
(
' contraction done {:.3f}s'
.
format
(
time
.
time
()
-
t0
))
print
(
' total loss'
,
loss
.
item
())
...
...
vmps.py
View file @
5868058b
...
...
@@ -4,13 +4,13 @@ torch.set_num_threads(4)
from
lanczos
import
lanczos
def
mpsrg
(
A
,
T
,
use_
lanczos
=
False
):
def
mpsrg
(
A
,
T
,
lanczos
_steps
=
0
):
Asymm
=
(
A
+
A
.
permute
(
2
,
1
,
0
))
*
0.5
D
,
d
=
Asymm
.
shape
[
0
],
Asymm
.
shape
[
1
]
#t0 = time.time()
if
use_
lanczos
:
if
lanczos
_steps
>
0
:
#phi0 = Asymm.view(D**2*d)
phi0
=
torch
.
randn
(
D
**
2
*
d
,
dtype
=
T
.
dtype
,
device
=
T
.
device
)
phi0
=
phi0
/
phi0
.
norm
()
...
...
@@ -18,20 +18,20 @@ def mpsrg(A, T, use_lanczos=False):
Tx
=
(
T
.
view
(
-
1
,
d
)
@
x
.
view
(
D
,
d
,
D
).
permute
(
1
,
0
,
2
).
contiguous
().
view
(
d
,
-
1
)).
view
(
d
,
d
,
d
,
D
,
D
).
permute
(
1
,
3
,
0
,
2
,
4
).
contiguous
()
return
((
Asymm
.
view
(
D
,
d
*
D
)
@
Tx
.
view
(
d
*
D
,
d
*
d
*
D
)).
view
(
D
*
d
,
d
*
D
)
@
Asymm
.
permute
(
1
,
2
,
0
).
contiguous
().
view
(
d
*
D
,
D
)).
view
(
D
**
2
*
d
)
w
=
lanczos
(
Hopt
,
phi0
,
100
)
w
=
lanczos
(
Hopt
,
phi0
,
lanczos_steps
)
else
:
B
=
torch
.
einsum
(
'ldr,adcb,icj->lairbj'
,
(
Asymm
,
T
,
Asymm
)).
contiguous
().
view
(
D
**
2
*
d
,
D
**
2
*
d
)
w
,
_
=
torch
.
symeig
(
B
,
eigenvectors
=
True
)
lnZ1
=
torch
.
log
(
w
.
abs
().
max
())
if
use_
lanczos
:
if
lanczos
_steps
>
0
:
#phi0 = Asymm.sum(1).view(D**2)
phi0
=
torch
.
randn
(
D
**
2
,
dtype
=
T
.
dtype
,
device
=
T
.
device
)
phi0
=
phi0
/
phi0
.
norm
()
def
Hopt
(
x
):
x
=
x
.
view
(
D
,
D
)
return
((
Asymm
.
view
(
D
*
d
,
D
)
@
x
).
view
(
D
,
d
*
D
)
@
Asymm
.
permute
(
1
,
2
,
0
).
contiguous
().
view
(
d
*
D
,
D
)).
view
(
D
**
2
)
w
=
lanczos
(
Hopt
,
phi0
,
100
)
w
=
lanczos
(
Hopt
,
phi0
,
lanczos_steps
)
else
:
C
=
torch
.
einsum
(
'ldr,idj->lirj'
,
(
Asymm
,
Asymm
)).
contiguous
().
view
(
D
**
2
,
D
**
2
)
w
,
_
=
torch
.
symeig
(
C
,
eigenvectors
=
True
)
...
...
@@ -55,11 +55,11 @@ def vmps(T, d, D, Nepochs=50, Ainit=None, use_lanczos=False):
optimizer
.
zero_grad
()
#print ('einsum', time.time()- t0)
#print ((B-B.t()).abs().sum(), (C-C.t()).abs().sum())
t0
=
time
.
time
()
#
t0 = time.time()
loss
=
mpsrg
(
A
,
T
.
detach
(),
use_lanczos
)
# loss = -lnZ , here we optimize over A
#print ('mpsrg', time.time()- t0)
#print (' loss', loss.item())
t0
=
time
.
time
()
#
t0 = time.time()
loss
.
backward
(
retain_graph
=
False
)
#print ('backward', time.time()- t0)
return
loss
...
...
@@ -78,7 +78,7 @@ if __name__=='__main__':
parser
.
add_argument
(
"-beta"
,
type
=
float
,
default
=
0.44
,
help
=
"beta"
)
parser
.
add_argument
(
"-Nepochs"
,
type
=
int
,
default
=
100
,
help
=
"Nepochs"
)
parser
.
add_argument
(
"-float32"
,
action
=
'store_true'
,
help
=
"use float32"
)
parser
.
add_argument
(
"-lanczos
"
,
action
=
'store_true'
,
help
=
"lanczos"
)
parser
.
add_argument
(
"-lanczos
_steps"
,
type
=
int
,
default
=
0
,
help
=
"lanczos
steps
"
)
parser
.
add_argument
(
"-cuda"
,
type
=
int
,
default
=-
1
,
help
=
"use GPU"
)
args
=
parser
.
parse_args
()
device
=
torch
.
device
(
"cpu"
if
args
.
cuda
<
0
else
"cuda:"
+
str
(
args
.
cuda
))
...
...
@@ -92,7 +92,7 @@ if __name__=='__main__':
T
=
torch
.
einsum
(
'ai,aj,ak,al->ijkl'
,
(
M
,
M
,
M
,
M
))
#optimization
lnZ
=
vmps
(
T
,
2
,
args
.
Dcut
,
Nepochs
=
args
.
Nepochs
,
use_
lanczos
=
args
.
lanczos
)
lnZ
=
vmps
(
T
,
2
,
args
.
Dcut
,
Nepochs
=
args
.
Nepochs
,
lanczos
_steps
=
args
.
lanczos
_steps
)
#recompute lnZ using optimized A
dlnZ
=
torch
.
autograd
.
grad
(
lnZ
,
K
,
create_graph
=
True
)[
0
]
# En = -d lnZ / d beta
print
(
-
dlnZ
.
item
())
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment