Advanced Computing Platform for Theoretical Physics

Commit e98b40e7 authored by Lei Wang's avatar Lei Wang
Browse files

iterative variational calculation

parent 246dca5e
...@@ -10,8 +10,8 @@ torch.manual_seed(42) ...@@ -10,8 +10,8 @@ torch.manual_seed(42)
#from hotrg2 import hotrg as contraction #from hotrg2 import hotrg as contraction
#from trg import levin_nave_trg as contraction #from trg import levin_nave_trg as contraction
from ctmrg import ctmrg as contraction #from ctmrg import ctmrg as contraction
from itertools import permutations from vmps import vmps as contraction
if __name__=='__main__': if __name__=='__main__':
import time import time
...@@ -66,7 +66,8 @@ if __name__=='__main__': ...@@ -66,7 +66,8 @@ if __name__=='__main__':
lnZ, error2 = contraction(T2, D**2, Dcut, Niter) lnZ, error2 = contraction(T2, D**2, Dcut, Niter)
loss = (-lnT + lnZ) loss = (-lnT + lnZ)
print (' contraction done {:.3f}s'.format(time.time()-t0)) print (' contraction done {:.3f}s'.format(time.time()-t0))
print (' loss, error', loss.item(), error1.item(), error2.item()) print (' total loss', loss.item())
#print (' loss, error', loss.item(), error1.item(), error2.item())
t0=time.time() t0=time.time()
loss.backward() loss.backward()
......
import torch import torch
torch.set_num_threads(4) torch.set_num_threads(4)
def vmps(T, d, D, no_iter, Nepochs): def vmps(T, d, D, no_iter, Nepochs=5):
A = torch.nn.Parameter(0.01*torch.randn(D, d, D, dtype=torch.float64, device=device)) A = torch.nn.Parameter(0.01*torch.randn(D, d, D, dtype=T.dtype, device=T.device))
def mpsrg(B, C): def mpsrg(B, C):
lnZ1 = 0.0 lnZ1 = 0.0
...@@ -23,7 +23,7 @@ def vmps(T, d, D, no_iter, Nepochs): ...@@ -23,7 +23,7 @@ def vmps(T, d, D, no_iter, Nepochs):
#print (torch.log(torch.trace(B))/2**no_iter, torch.log(torch.trace(C))/2**no_iter) #print (torch.log(torch.trace(B))/2**no_iter, torch.log(torch.trace(C))/2**no_iter)
return lnZ1 , lnZ2 return lnZ1 , lnZ2
optimizer = torch.optim.LBFGS([A], max_iter=20) optimizer = torch.optim.LBFGS([A], max_iter=10)
def closure(): def closure():
optimizer.zero_grad() optimizer.zero_grad()
Asymm = (A + A.permute(2, 1, 0))*0.5 Asymm = (A + A.permute(2, 1, 0))*0.5
...@@ -37,18 +37,18 @@ def vmps(T, d, D, no_iter, Nepochs): ...@@ -37,18 +37,18 @@ def vmps(T, d, D, no_iter, Nepochs):
#print ('mpsrg', time.time()- t0) #print ('mpsrg', time.time()- t0)
loss = -lnZ1 + lnZ2 loss = -lnZ1 + lnZ2
print (' loss', loss.item(), lnZ1.item(), lnZ2.item()) print (' loss', loss.item(), lnZ1.item(), lnZ2.item())
#t0 = time.time() #t0 = time.time()
loss.backward() loss.backward(retain_graph=True)
#print ('backward', time.time()- t0) #print ('backward', time.time()- t0)
return loss return loss
for epoch in range(Nepochs): for epoch in range(Nepochs):
loss = optimizer.step(closure) loss = optimizer.step(closure)
print ('epoch, free energy', epoch, loss.item()) print (' epoch, free energy', epoch, loss.item())
return -loss return -loss, None
if __name__=='__main__': if __name__=='__main__':
import time import time
...@@ -71,4 +71,4 @@ if __name__=='__main__': ...@@ -71,4 +71,4 @@ if __name__=='__main__':
M = torch.stack([torch.cat([c, s]), torch.cat([c, -s])]) M = torch.stack([torch.cat([c, s]), torch.cat([c, -s])])
T = torch.einsum('ai,aj,ak,al->ijkl', (M, M, M, M)) T = torch.einsum('ai,aj,ak,al->ijkl', (M, M, M, M))
lnZ = vmps(T, 2, args.Dcut, args.Niter, args.Nepochs) vmps(T, 2, args.Dcut, args.Niter, args.Nepochs)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment