Advanced Computing Platform for Theoretical Physics

Commit a144be5b authored by Lei Wang's avatar Lei Wang
Browse files

even better encapsulation; inner loop optimize A (boundary MPS) , outer loop...

even better encapsulation; inner loop optimize A (boundary MPS) , outer loop optimize T (boundary PEPS)
parent fc796aba
...@@ -38,7 +38,7 @@ def vmps(T, d, D, no_iter, Nepochs=5): ...@@ -38,7 +38,7 @@ def vmps(T, d, D, no_iter, Nepochs=5):
#print ('einsum', time.time()- t0) #print ('einsum', time.time()- t0)
#print ((B-B.t()).abs().sum(), (C-C.t()).abs().sum()) #print ((B-B.t()).abs().sum(), (C-C.t()).abs().sum())
#t0 = time.time() #t0 = time.time()
loss = mpsrg(A, T) # -lnZ loss = mpsrg(A, T.detach()) # loss = -lnZ , here we optimize over A
#print ('mpsrg', time.time()- t0) #print ('mpsrg', time.time()- t0)
print (' loss', loss.item()) print (' loss', loss.item())
#t0 = time.time() #t0 = time.time()
...@@ -50,7 +50,7 @@ def vmps(T, d, D, no_iter, Nepochs=5): ...@@ -50,7 +50,7 @@ def vmps(T, d, D, no_iter, Nepochs=5):
loss = optimizer.step(closure) loss = optimizer.step(closure)
print (' epoch, free energy', epoch, loss.item()) print (' epoch, free energy', epoch, loss.item())
return loss, A return -mpsrg(A.detach(), T), None # pass lnZ out, we need to optimize over T
if __name__=='__main__': if __name__=='__main__':
import time import time
...@@ -75,9 +75,8 @@ if __name__=='__main__': ...@@ -75,9 +75,8 @@ if __name__=='__main__':
T = torch.einsum('ai,aj,ak,al->ijkl', (M, M, M, M)) T = torch.einsum('ai,aj,ak,al->ijkl', (M, M, M, M))
#optimization #optimization
_, A = vmps(T.detach(), 2, args.Dcut, args.Niter, args.Nepochs) lnZ, _ = vmps(T, 2, args.Dcut, args.Niter, args.Nepochs)
#recompute lnZ using optimized A #recompute lnZ using optimized A
lnZ = -mpsrg(A.detach(), T)
dlnZ = torch.autograd.grad(lnZ, K, create_graph=True)[0] # En = -d lnZ / d beta dlnZ = torch.autograd.grad(lnZ, K, create_graph=True)[0] # En = -d lnZ / d beta
print (-dlnZ.item()) print (-dlnZ.item())
#second order derivative evaluated in this way seems not correct, no effect of envioment #second order derivative evaluated in this way seems not correct, no effect of envioment
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment