Advanced Computing Platform for Theoretical Physics

Commit 306844bb authored by Lei Wang's avatar Lei Wang
Browse files

fix

parent 5868058b
......@@ -25,7 +25,7 @@ if __name__=='__main__':
device = torch.device("cpu" if args.cuda<0 else "cuda:"+str(args.cuda))
dtype = torch.float32 if args.float32 else torch.float64
if args.lanczos>0: print ('lanczos steps', args.lanczos_steps)
if args.lanczos_steps>0: print ('lanczos steps', args.lanczos_steps)
d = 2 # fixed
......
......@@ -39,7 +39,7 @@ def mpsrg(A, T, lanczos_steps=0):
return -lnZ1 + lnZ2
def vmps(T, d, D, Nepochs=50, Ainit=None, use_lanczos=False):
def vmps(T, d, D, Nepochs=50, Ainit=None, lanczos_steps=0):
#symmetrize
T = (T + T.permute(3, 1, 2, 0))/2. #left-right
......@@ -56,7 +56,7 @@ def vmps(T, d, D, Nepochs=50, Ainit=None, use_lanczos=False):
#print ('einsum', time.time()- t0)
#print ((B-B.t()).abs().sum(), (C-C.t()).abs().sum())
#t0 = time.time()
loss = mpsrg(A, T.detach(), use_lanczos) # loss = -lnZ , here we optimize over A
loss = mpsrg(A, T.detach(), lanczos_steps) # loss = -lnZ , here we optimize over A
#print ('mpsrg', time.time()- t0)
#print (' loss', loss.item())
#t0 = time.time()
......@@ -68,7 +68,7 @@ def vmps(T, d, D, Nepochs=50, Ainit=None, use_lanczos=False):
loss = optimizer.step(closure)
print (' epoch, loss', epoch, loss.item())
return -mpsrg(A.detach(), T, use_lanczos) # pass lnZ out, we need to optimize over T
return -mpsrg(A.detach(), T, lanczos_steps) # pass lnZ out, we need to optimize over T
if __name__=='__main__':
import argparse
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment