Advanced Computing Platform for Theoretical Physics

commit大文件会使得服务器变得不稳定,请大家尽量只commit代码,不要commit大的文件。

Commit 143e0e03 authored by Lei Wang's avatar Lei Wang
Browse files

is able to use lanczos to diagonalize transfer matrix

parent e0459b6a
......@@ -22,18 +22,21 @@ if __name__=='__main__':
parser.add_argument("-Niter", type=int, default=20, help="Niter")
parser.add_argument("-float32", action='store_true', help="use float32")
parser.add_argument("-lanczos", action='store_true', help="lanczos")
parser.add_argument("-cuda", type=int, default=-1, help="use GPU")
args = parser.parse_args()
device = torch.device("cpu" if args.cuda<0 else "cuda:"+str(args.cuda))
dtype = torch.float32 if args.float32 else torch.float64
if args.lanczos: print ('use lanczos')
d = 2 # fixed
D = args.D
Dcut = args.Dcut
Niter = args.Niter
B = 0.01* torch.randn(d, D, D, D, D, dtype=dtype, device=device)
B = 0.1* torch.randn(d, D, D, D, D, dtype=dtype, device=device)
#symmetrize initial boundary PEPS
B = (B + B.permute(0, 4, 2, 3, 1))/2.
B = (B + B.permute(0, 1, 3, 2, 4))/2.
......@@ -42,8 +45,8 @@ if __name__=='__main__':
A = torch.nn.Parameter(B.view(d, D**4))
#boundary MPS
A1 = torch.nn.Parameter(0.01*torch.randn(Dcut, D**2*d, Dcut, dtype=dtype, device=device))
A2 = torch.nn.Parameter(0.01*torch.randn(Dcut, D**2, Dcut, dtype=dtype, device=device))
A1 = torch.nn.Parameter(0.1*torch.randn(Dcut, D**2*d, Dcut, dtype=dtype, device=device))
A2 = torch.nn.Parameter(0.1*torch.randn(Dcut, D**2, Dcut, dtype=dtype, device=device))
#dimer covering
T = torch.zeros(d, d, d, d, d, d, dtype=dtype, device=device)
......@@ -65,8 +68,8 @@ if __name__=='__main__':
#double layer
T2 = (A.t()@A).view(D, D, D, D, D, D, D, D).permute(0,4, 1,5, 2,6, 3,7).contiguous().view(D**2, D**2, D**2, D**2)
t0=time.time()
lnT = contraction(T1, D**2*d, Dcut, Niter, A1)
lnZ = contraction(T2, D**2, Dcut, Niter, A2)
lnT = contraction(T1, D**2*d, Dcut, Niter, A1, use_lanczos=args.lanczos)
lnZ = contraction(T2, D**2, Dcut, Niter, A2, use_lanczos=args.lanczos)
loss = (-lnT + lnZ)
print (' contraction done {:.3f}s'.format(time.time()-t0))
print (' total loss', loss.item())
......
import time
import torch
torch.set_num_threads(4)
from lanczos import lanczos
def mpsrg(A, T):
def mpsrg(A, T, use_lanczos=False):
Asymm = (A + A.permute(2, 1, 0))*0.5
D, d = Asymm.shape[0], Asymm.shape[1]
#t0 = time.time()
B = torch.einsum('ldr,adcb,icj->lairbj', (Asymm, T, Asymm)).contiguous().view(D**2*d, D**2*d)
C = torch.einsum('ldr,idj->lirj', (Asymm, Asymm)).contiguous().view(D**2, D**2)
w, _ = torch.symeig(B, eigenvectors=True)
#phi0 = Asymm.view(D**2*d)
#phi0 = phi0/phi0.norm()
#w = lanczos(lambda x: torch.mv(B,x), phi0, 100)
if use_lanczos:
phi0 = Asymm.view(D**2*d)
phi0 = phi0/phi0.norm()
w = lanczos(lambda x: torch.mv(B,x), phi0, 100)
else:
w, _ = torch.symeig(B, eigenvectors=True)
lnZ1 = torch.log(w.abs().max())
w, _ = torch.symeig(C, eigenvectors=True)
#phi0 = Asymm.sum(1).view(D**2)
#phi0 = phi0/phi0.norm()
#w = lanczos(lambda x: torch.mv(C,x), phi0, 100)
if use_lanczos:
phi0 = Asymm.sum(1).view(D**2)
phi0 = phi0/phi0.norm()
w = lanczos(lambda x: torch.mv(C,x), phi0, 100)
else:
w, _ = torch.symeig(C, eigenvectors=True)
lnZ2 = torch.log(w.abs().max())
#lnZ1 = 0.0
......@@ -36,7 +42,7 @@ def mpsrg(A, T):
# C = torch.mm(C, C)
return -lnZ1 + lnZ2
def vmps(T, d, D, Nepochs=50, Ainit=None):
def vmps(T, d, D, Nepochs=50, Ainit=None, use_lanczos=False):
#symmetrize
T = (T + T.permute(3, 1, 2, 0))/2. #left-right
......@@ -53,7 +59,7 @@ def vmps(T, d, D, Nepochs=50, Ainit=None):
#print ('einsum', time.time()- t0)
#print ((B-B.t()).abs().sum(), (C-C.t()).abs().sum())
t0 = time.time()
loss = mpsrg(A, T.detach()) # loss = -lnZ , here we optimize over A
loss = mpsrg(A, T.detach(), use_lanczos) # loss = -lnZ , here we optimize over A
print ('mpsrg', time.time()- t0)
print (' loss', loss.item())
t0 = time.time()
......@@ -65,10 +71,9 @@ def vmps(T, d, D, Nepochs=50, Ainit=None):
loss = optimizer.step(closure)
#print (' epoch, free energy', epoch, loss.item())
return -mpsrg(A.detach(), T) # pass lnZ out, we need to optimize over T
return -mpsrg(A.detach(), T, use_lanczos) # pass lnZ out, we need to optimize over T
if __name__=='__main__':
import time
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument("-D", type=int, default=2, help="D")
......@@ -76,6 +81,7 @@ if __name__=='__main__':
parser.add_argument("-beta", type=float, default=0.44, help="beta")
parser.add_argument("-Nepochs", type=int, default=100, help="Nepochs")
parser.add_argument("-float32", action='store_true', help="use float32")
parser.add_argument("-lanczos", action='store_true', help="lanczos")
parser.add_argument("-cuda", type=int, default=-1, help="use GPU")
args = parser.parse_args()
device = torch.device("cpu" if args.cuda<0 else "cuda:"+str(args.cuda))
......@@ -89,7 +95,7 @@ if __name__=='__main__':
T = torch.einsum('ai,aj,ak,al->ijkl', (M, M, M, M))
#optimization
lnZ, _ = vmps(T, 2, args.Dcut, args.Nepochs)
lnZ, _ = vmps(T, 2, args.Dcut, Nepochs=args.Nepochs, use_lanczos=args.lanczos)
#recompute lnZ using optimized A
dlnZ = torch.autograd.grad(lnZ, K, create_graph=True)[0] # En = -d lnZ / d beta
print (-dlnZ.item())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment