Advanced Computing Platform for Theoretical Physics

Commit a255e94e authored by Lei Wang's avatar Lei Wang
Browse files

start lanczos from randn

parent 35a00557
......@@ -11,7 +11,8 @@ def mpsrg(A, T, use_lanczos=False):
#t0 = time.time()
if use_lanczos:
phi0 = Asymm.view(D**2*d)
#phi0 = Asymm.view(D**2*d)
phi0 = torch.randn(D**2*d, dtype=T.dtype, device=T.device)
phi0 = phi0/phi0.norm()
def Hopt(x):
Tx = (T.view(-1, d) @ x.view(D, d, D).permute(1, 0, 2).contiguous().view(d,-1)).view(d,d,d,D,D).permute(1,3,0,2,4).contiguous()
......@@ -24,7 +25,8 @@ def mpsrg(A, T, use_lanczos=False):
lnZ1 = torch.log(w.abs().max())
if use_lanczos:
phi0 = Asymm.sum(1).view(D**2)
#phi0 = Asymm.sum(1).view(D**2)
phi0 = torch.randn(D**2, dtype=T.dtype, device=T.device)
phi0 = phi0/phi0.norm()
def Hopt(x):
x = x.view(D, D)
......@@ -56,7 +58,7 @@ def vmps(T, d, D, Nepochs=50, Ainit=None, use_lanczos=False):
t0 = time.time()
loss = mpsrg(A, T.detach(), use_lanczos) # loss = -lnZ , here we optimize over A
#print ('mpsrg', time.time()- t0)
print (' loss', loss.item())
#print (' loss', loss.item())
t0 = time.time()
loss.backward(retain_graph=False)
#print ('backward', time.time()- t0)
......@@ -90,7 +92,7 @@ if __name__=='__main__':
T = torch.einsum('ai,aj,ak,al->ijkl', (M, M, M, M))
#optimization
lnZ, _ = vmps(T, 2, args.Dcut, Nepochs=args.Nepochs, use_lanczos=args.lanczos)
lnZ = vmps(T, 2, args.Dcut, Nepochs=args.Nepochs, use_lanczos=args.lanczos)
#recompute lnZ using optimized A
dlnZ = torch.autograd.grad(lnZ, K, create_graph=True)[0] # En = -d lnZ / d beta
print (-dlnZ.item())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment