Advanced Computing Platform for Theoretical Physics

commit大文件会使得服务器变得不稳定,请大家尽量只commit代码,不要commit大的文件。

Commit 9c9dc1b8 authored by Lei Wang's avatar Lei Wang
Browse files

trying to accelerate ctmrg iteration with fixed point iterations, does not seem to help

parent a813c8cb
import torch
def step(T, x, chi):
d = T.shape[0]
C = torch.as_tensor(x[:chi**2], dtype=T.dtype, device=T.device).view(chi, chi)
E = torch.as_tensor(x[chi**2:], dtype=T.dtype, device=T.device).view(chi, d, chi)
D_new = min(d*chi, chi)
# step 1: contruct the density matrix Rho
Rho = torch.tensordot(C,E,([1],[0])) # C(ef)*EU(fga)=Rho(ega)
Rho = torch.tensordot(Rho,E,([0],[0])) # Rho(ega)*EL(ehc)=Rho(gahc)
Rho = torch.tensordot(Rho,T,([0,2],[0,1])) # Rho(gahc)*T(ghdb)=Rho(acdb)
Rho = Rho.permute(0,3,1,2).contiguous().view(d*chi, d*chi) # Rho(acdb)->Rho(ab;cd)
Rho = Rho+Rho.t()
Rho = Rho/Rho.norm()
# step 2: Get Isometry P
#U, S, V = torch.svd(Rho)
#truncation_error = S[D_new:].sum()/S.sum()
#P = U[:, :D_new] # projection operator
#can also do symeig since Rho is symmetric
S, U = torch.symeig(Rho, eigenvectors=True)
sorted, indices = torch.sort(S.abs(), descending=True)
truncation_error = sorted[D_new:].sum()/sorted.sum()
S = S[indices][:D_new]
P = U[:, indices][:, :D_new] # projection operator
# step 3: renormalize C and E
C = (P.t() @ Rho @ P) #C(D_new, D_new)
## EL(u,r,d)
P = P.view(chi,d,D_new)
E = torch.tensordot(E, P, ([0],[0])) # EL(def)P(dga)=E(efga)
E = torch.tensordot(E, T, ([0,2],[1,0])) # E(efga)T(gehb)=E(fahb)
E = torch.tensordot(E, P, ([0,2],[0,1])) # E(fahb)P(fhc)=E(abc)
# step 4: symmetrize C and E
C = 0.5*(C+C.t())
#trying to fix gauge of E tensor
s = E[:, 1, 0].sign()
E = (s[:, None]*E.view(chi, -1)).view(-1, chi)*s
E = E.view(chi, d, chi)
E = 0.5*(E + E.permute(2, 1, 0))
C = C.view(-1)/C.norm()
E = E.view(-1)/E.norm()
x = torch.cat([C, E])
return x
class CTMRG(torch.autograd.Function):
@staticmethod
def forward(ctx, T, x, chi, maxiter=50, tol=1E-12):
diff = 1E10
for n in range(maxiter):
x_star = step(T, x, chi)
diff = (x_star- x).abs().max()
#diff1 = torch.dist(x_star[:D**2], x[:D**2])
#diff2 = torch.dist(x_star[D**2:], x[D**2:])
#print (diff1.item(), diff2.item())
#idx = torch.argmax( (x_star[D**2:] - x[D**2:]).abs())
#print ('diff', (x_star[D**2:][idx]+ x[D**2:][idx]).item() )
print (n, diff.item())
if (diff < tol):
break
else:
x = x_star
print ('forward converged to', n, diff.item())
ctx.save_for_backward(T, x_star)
return x_star
@staticmethod
def backward(ctx, grad):
T, x_star = detach_variable(ctx.saved_tensors)
dT = grad
for n in range(args.Maxiter):
with torch.enable_grad():
x = step(T, x_star)
grad = torch.autograd.grad(x, x_star, grad_outputs=grad)[0]
grad_norm = torch.norm(grad)
if (grad_norm > args.tol):
dT = dT + grad
else:
break
print ('backward converged to', n, grad_norm.item())
with torch.enable_grad():
x = step(T, x_star)
dT = torch.autograd.grad(x, T, grad_outputs=dT)[0]
return dT, None, None, None, None
if __name__=='__main__':
import time
torch.manual_seed(42)
d = 2
chi = 50
device = 'cpu'
dtype = torch.float64
# T(u,l,d,r)
T = torch.zeros(d, d, d, d, dtype=dtype, device=device)
T[0, 0, 0, 1] = 1.0
T[0, 0, 1, 0] = 1.0
T[0, 1, 0, 0] = 1.0
T[1, 0, 0, 0] = 1.0
C = torch.rand(chi, chi, dtype=dtype)
E = torch.rand(chi, d, chi, dtype=dtype)
C = 0.5*(C+C.t())
E = 0.5*(E + E.permute(2, 1, 0))
C = C/C.norm()
E = E/E.norm()
x = torch.cat([C.view(-1), E.view(-1)])
ctmrg = CTMRG.apply
x = ctmrg(T, x, chi, 100)
def fun(x):
return step(T, x, chi).numpy() -x
from scipy import optimize
sol = optimize.root(fun, x.numpy(), method='anderson', options={'fatol':1E-10})
print (sol)
import torch
from .adlib import SVD
svd = SVD.apply
from .ctmrg import CTMRG
from .vumps import vumps_run, vumps_calc
def projection(T, epsilon=1E-3):
D = T.shape[0] # double layer bond dimension
M = T.view(D, -1)
M = M@M.t()
U, S, _ = svd(M)
#S = S/S.max()
#chi = (S>epsilon).sum().item()
#up to truncation error
chi = (torch.cumsum(S, dim=0)/S.sum() <= 1-epsilon).sum().item()
U = U[:, :chi].view(D, chi)
#print (S/S.max())
#print (torch.cumsum(S, dim=0)/S.sum() )
print ('---->truncated from', D, 'to', chi)
return torch.einsum('abcd,ai,bj,ck,dl->ijkl', (T, U, U, U, U)), U
def ctmrg(T, chi, maxiter, epsilon):
with torch.no_grad():
C, E = CTMRG(T, chi, maxiter, epsilon)
Z1 = torch.einsum('ab,bcd,fd,gha,chij,fjk,lg,mil,mk', (C,E,C,E,T,E,C,E,C))
Z3 = torch.einsum('ab,bc,cd,da', (C,C,C,C))
Z2 = torch.einsum('ab,bcd,de,fa,gcf,ge',(C,E,C,C,E,C))
lnZ = torch.log(Z1.abs()) + torch.log(Z3.abs()) - 2.*torch.log(Z2.abs())
return lnZ
def vumps(T, chi, maxiter, epsilon):
with torch.no_grad():
_, AC, F, C, _ = vumps_run(T, chi, epsilon, maxiter)
lnZ = torch.log(vumps_calc(T, AC, F, C))
return lnZ
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment