Advanced Computing Platform for Theoretical Physics

Commit 3394330f authored by Lei Wang's avatar Lei Wang
Browse files

for taurg, seems sweep is better than bfgs + svd, although starting at random...

for taurg, seems sweep is better than bfgs + svd, although starting at random does not reach optimal with hotrg init
parent 62f2db72
import time
import numpy as np
import torch
torch.manual_seed(42)
torch.set_num_threads(1)
from utils import gauge, trace
from fixgaugeqr import fixgauge_qr
from adlib import SVD
svd = SVD.apply
def renormalize(T, U):
return torch.einsum('axob,amz,moyn,bnw->zxyw',(T, U, T, U))
class Ising(torch.nn.Module):
def __init__(self, chi, niter, dtype=torch.float64, device='cpu', use_checkpoint=False):
super(Ising, self).__init__()
self.D = 2
self.chi = chi # cutoff
self.niter = niter
self.dtype = dtype
self.device = device
self.params = torch.nn.ParameterList()
for step in range(niter):
#same as chi = min(self.D**(2**step), self.chi), but avoid computing large int
if 2**step < np.log(1.*self.chi) / np.log(1.*self.D):
chi = self.D**(2**step)
else:
chi = self.chi
chi_new = min(chi**2, self.chi)
self.params.append(torch.nn.Parameter(torch.randn(chi**2, chi_new, dtype=dtype, device=device)))
def get_isometry(self, step):
# QR parametrization
#Q, _ = fixgauge_qr(self.params[step])
#return Q
# SVD parametrization
U, S, V = svd(self.params[step])
return U@V.t()
def set_isometry(self, step, U):
self.params[step].data = U
def init_isometry(self, T):
'''
initialize isometry with hotrg
'''
lnZ = 0.0
for step in range(self.niter):
f = T.abs().max()
lnZ += 2**(-step)*torch.log(f)
T = T / f
#contract up-down
vl, el = gauge(T, self.chi,'l')
vr, er = gauge(T, self.chi,'r')
U = vl if (el < er) else vr
self.set_isometry(step, U.view(-1, U.shape[-1]))
T = renormalize(T, U)
#build 1D transfer matrix
A = torch.zeros(T.shape[0], T.shape[3], dtype=T.dtype, device=T.device)
for y in range(T.shape[1]):
A = A+ T[:, y, y, :]
A = (A + A.t())/2
w, v = torch.symeig(A, eigenvectors=True)
lnZ = lnZ + torch.logsumexp(2**self.niter*torch.log(w.abs()), dim=0)/4**self.niter
return lnZ
def forward(self, T):
lnZ = 0.0
for n in range(self.niter):
f = T.abs().max()
lnZ += 2**(-n)*torch.log(f)
T = T / f
U = self.get_isometry(n) # isometry for this step
chi = int(np.sqrt(U.shape[0]))
U = U.view(chi, chi, -1)
T = renormalize(T, U)
#build 1D transfer matrix
A = torch.zeros(T.shape[0], T.shape[3], dtype=T.dtype, device=T.device)
for y in range(T.shape[1]):
A = A+ T[:, y, y, :]
A = (A + A.t())/2
w, v = torch.symeig(A, eigenvectors=True)
lnZ = lnZ + torch.logsumexp(2**self.niter*torch.log(w.abs()), dim=0)/4**self.niter
return lnZ
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument("-beta", type=float, default=0.44068679350977147, help="beta")
parser.add_argument("-chi", type=int, default=16, help="chi")
parser.add_argument("-niter", type=int, default=4, help="niter")
parser.add_argument("-nsweeps", type=int, default=50, help="nsweeps")
parser.add_argument("-init", default='random', choices=['random', 'hotrg'], help="initial isometry")
parser.add_argument("-exact", type=float, default=-0.9296960086092727, help="exact free energy")
parser.add_argument("-float32", action='store_true', help="use float32")
parser.add_argument("-cuda", type=int, default=-1, help="GPU #")
args = parser.parse_args()
device = torch.device("cpu" if args.cuda<0 else "cuda:"+str(args.cuda))
dtype = torch.float32 if args.float32 else torch.float64
print ('use', dtype)
# initial tensor
K = torch.tensor([args.beta], dtype=dtype, device=device)
c = torch.sqrt(torch.cosh(K)/2.)
s = torch.sqrt(torch.sinh(K)/2.)
M = torch.stack([torch.cat([c+s, c-s]), torch.cat([c-s, c+s])])
T = torch.einsum('ai,aj,ak,al->ijkl', (M, M, M, M))
model = Ising(args.chi, args.niter, dtype=dtype, device=device)
if args.init == 'hotrg':
with torch.no_grad():
loss = -model.init_isometry(T)
if args.exact is not None:
print ('initial loss with hotrg:', loss.item(), (loss.item()-args.exact)/np.abs(args.exact) )
else:
print ('initial loss with hotrg:', sweeps, loss.item() )
else:
pass
optimizer = torch.optim.LBFGS(model.parameters(), max_iter=10, tolerance_grad=0, tolerance_change=0, line_search_fn='strong_wolfe')
params = list(model.parameters())
params = list(filter(lambda p: p.requires_grad, params))
nparams = sum([np.prod(p.size()) for p in params])
print ('total nubmer of trainable parameters:', nparams)
def closure():
optimizer.zero_grad()
loss = -model.forward(T) # loss = -lnZ
loss.backward()
print (loss.item())
return loss
for epoch in range(args.nsweeps):
loss = optimizer.step(closure)
if args.exact is not None:
print (epoch, loss.item(), (loss.item()-args.exact)/np.abs(args.exact) )
else:
print (epoch, loss.item())
......@@ -28,7 +28,9 @@ class Ising(torch.nn.Module):
chi = self.chi
chi_new = min(chi**2, self.chi)
self.params.append(torch.nn.Parameter(torch.randn(chi, chi, chi_new, dtype=dtype, device=device)))
X = torch.randn(chi*chi, chi_new, dtype=dtype, device=device)
Q, _ = torch.qr(X)
self.params.append(torch.nn.Parameter(Q.view(chi, chi, chi_new)))
def get_isometry(self, step):
return self.params[step]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment