Advanced Computing Platform for Theoretical Physics

commit大文件会使得服务器变得不稳定,请大家尽量只commit代码,不要commit大的文件。

Commit c734f9b7 authored by Lei Wang's avatar Lei Wang
Browse files

we SHOULD differentiate into the 2d contraction

parent 2b604e71
......@@ -5,12 +5,7 @@ svd = SVD.apply
def ctmrg(T, d, Dcut, max_iter):
#symmetrize
T = (T + T.permute(3, 1, 2, 0))/2.
T = (T + T.permute(0, 2, 1, 3))/2.
T = (T + T.permute(2, 3, 0, 1))/2.
T = (T + T.permute(1, 0, 3, 2))/2.
lnZ = 0.0
truncation_error = 0.0
#C = torch.randn(d, d, dtype=T.dtype, device=T.device) #T.sum((0,1))
......@@ -57,7 +52,10 @@ def ctmrg(T, d, Dcut, max_iter):
if (diff < 1E-8):
break
sold = s
print ('ctmrg iterations', n)
#print ('ctmrg iterations', n)
#C = C.detach()
#E = E.detach()
Z1 = torch.einsum('ab,bcd,fd,gha,hcij,fjk,lg,mil,mk', (C,E,C,E,T,E,C,E,C))
#CEC = torch.einsum('da,ebd,ce->abc', (C,E,C)).view(1, D**2*d)
#ETE = torch.einsum('abc,lbdr,mdn->almcrn',(E,T,E)).contiguous().view(D**2*d, D**2*d)
......
......@@ -12,6 +12,15 @@ torch.manual_seed(42)
from ctmrg import ctmrg as contraction
#from vmps import vmps as contraction
def symmetrize(A):
As = A.view(d, D, D, D, D)
As = (As + As.permute(0, 4, 2, 3, 1))/2.
As = (As + As.permute(0, 1, 3, 2, 4))/2.
As = (As + As.permute(0, 3, 4, 1, 2))/2.
As = (As + As.permute(0, 2, 1, 4, 3))/2.
As = As.view(d, D**4)
return As/As.norm()
if __name__=='__main__':
import time
import argparse
......@@ -37,11 +46,7 @@ if __name__=='__main__':
B = 0.01* torch.randn(d, D, D, D, D, dtype=dtype, device=device)
#symmetrize initial boundary PEPS
B = (B + B.permute(0, 4, 2, 3, 1))/2.
B = (B + B.permute(0, 1, 3, 2, 4))/2.
B = (B + B.permute(0, 3, 4, 1, 2))/2.
B = (B + B.permute(0, 2, 1, 4, 3))/2.
A = torch.nn.Parameter(B.view(d, D**4))
A = torch.nn.Parameter(symmetrize(B).view(d, D**4))
#boundary MPS
#A1 = torch.nn.Parameter(0.01*torch.randn(Dcut, D**2*d, Dcut, dtype=dtype, device=device))
......@@ -57,28 +62,29 @@ if __name__=='__main__':
T[1, 0, 0, 0, 0, 0] = 1.0
T = T.view(d, d**4, d)
optimizer = torch.optim.LBFGS([A], max_iter=20)
optimizer = torch.optim.LBFGS([A], max_iter=20, tolerance_grad=0)
#optimizer = torch.optim.Adam([A])
def closure():
optimizer.zero_grad()
As = symmetrize(A)
T1 = torch.einsum('xa,xby,yc' , (A,T,A)).view(D,D,D,D, d,d,d,d, D,D,D,D).permute(0,4,8, 1,5,9, 2,6,10, 3,7,11).contiguous().view(D**2*d, D**2*d, D**2*d, D**2*d)
T1 = torch.einsum('xa,xby,yc' , (As,T,As)).view(D,D,D,D, d,d,d,d, D,D,D,D).permute(0,4,8, 1,5,9, 2,6,10, 3,7,11).contiguous().view(D**2*d, D**2*d, D**2*d, D**2*d)
#double layer
T2 = (A.t()@A).view(D, D, D, D, D, D, D, D).permute(0,4, 1,5, 2,6, 3,7).contiguous().view(D**2, D**2, D**2, D**2)
T2 = (As.t()@As).view(D, D, D, D, D, D, D, D).permute(0,4, 1,5, 2,6, 3,7).contiguous().view(D**2, D**2, D**2, D**2)
t0=time.time()
#lnT = contraction(T1, D**2*d, Dcut, Niter, A1, lanczos_steps=args.lanczos_steps)
#lnZ = contraction(T2, D**2, Dcut, Niter, A2, lanczos_steps=args.lanczos_steps)
lnT, error1 = contraction(T1, D**2*d, Dcut, Niter)
lnZ, error2 = contraction(T2, D**2, Dcut, Niter)
loss = (-lnT + lnZ)
print (' contraction done {:.3f}s'.format(time.time()-t0))
print (' total loss', loss.item())
print (' loss, error', loss.item(), error1.item(), error2.item())
#print (' contraction done {:.3f}s'.format(time.time()-t0))
#print (' total loss', loss.item())
#print (' loss, error', loss.item(), error1.item(), error2.item())
t0=time.time()
loss.backward()
print (' backward done {:.3f}s'.format(time.time()-t0))
#print (' backward done {:.3f}s'.format(time.time()-t0))
return loss
for epoch in range(100):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment