Advanced Computing Platform for Theoretical Physics

Commit 1cd0d027 authored by Lei Wang's avatar Lei Wang
Browse files

change API so I start always from bondary of T, with krylov solver there is...

change API so I start always from bondary of T, with krylov solver there is indeed acceleration given small chi
parent 9fa4665f
import torch
def step(T, x, chi):
def step(T, C, E, chi):
d = T.shape[0]
C = torch.as_tensor(x[:chi**2], dtype=T.dtype, device=T.device).view(chi, chi)
E = torch.as_tensor(x[chi**2:], dtype=T.dtype, device=T.device).view(chi, d, chi)
D_new = min(d*chi, chi)
dimT, dimE = T.shape[0], E.shape[0]
D_new = min(dimE*dimT, chi)
# step 1: contruct the density matrix Rho
Rho = torch.tensordot(C,E,([1],[0])) # C(ef)*EU(fga)=Rho(ega)
Rho = torch.tensordot(Rho,E,([0],[0])) # Rho(ega)*EL(ehc)=Rho(gahc)
Rho = torch.tensordot(Rho,T,([0,2],[0,1])) # Rho(gahc)*T(ghdb)=Rho(acdb)
Rho = Rho.permute(0,3,1,2).contiguous().view(d*chi, d*chi) # Rho(acdb)->Rho(ab;cd)
Rho = Rho.permute(0,3,1,2).contiguous().view(dimE*dimT, dimE*dimT) # Rho(acdb)->Rho(ab;cd)
Rho = Rho+Rho.t()
Rho = Rho/Rho.norm()
......@@ -36,7 +33,7 @@ def step(T, x, chi):
C = (P.t() @ Rho @ P) #C(D_new, D_new)
## EL(u,r,d)
P = P.view(chi,d,D_new)
P = P.view(dimE,dimT,D_new)
E = torch.tensordot(E, P, ([0],[0])) # EL(def)P(dga)=E(efga)
E = torch.tensordot(E, T, ([0,2],[1,0])) # E(efga)T(gehb)=E(fahb)
E = torch.tensordot(E, P, ([0,2],[0,1])) # E(fahb)P(fhc)=E(abc)
......@@ -44,30 +41,33 @@ def step(T, x, chi):
# step 4: symmetrize C and E
C = 0.5*(C+C.t())
E = 0.5*(E + E.permute(2, 1, 0))
C = C.view(-1)/C.norm()
E = E.view(-1)/E.norm()
x = torch.cat([C, E])
return x
return C/C.norm(), E/E.norm(), S.abs()/S.abs().max()
class CTMRG(torch.autograd.Function):
@staticmethod
def forward(ctx, T, x, chi, maxiter=50, tol=1E-12):
diff = 1E10
def forward(ctx, T, chi, maxiter=50, tol=1E-12):
C = T.sum((0,1)) #
E = T.sum(1).permute(0,2,1)
s = torch.zeros(chi, dtype=T.dtype, device=T.device)
for n in range(maxiter):
x_star = step(T, x, chi)
diff1 = torch.dist(x_star[:chi**2], x[:chi**2])
diff2 = torch.dist(x_star[chi**2:], x[chi**2:])
print (n, diff1.item(), diff2.item())
#idx = torch.argmax( (x_star[D**2:] - x[D**2:]).abs())
#print ('diff', (x_star[D**2:][idx]+ x[D**2:][idx]).item() )
if (diff < tol):
break
else:
x = x_star
ctx.save_for_backward(T, x_star)
return x_star
C_new, E_new, s_new = step(T, C, E, chi)
if (s.numel() == s_new.numel()):
assert(C_new.numel() == chi**2)
diff1 = torch.dist(C, C_new)
diff2 = torch.dist(E, E_new)
diff = torch.dist(s, s_new)
print (n, diff1.item(), diff2.item(), diff.item())
if (diff1 < tol and diff2 < tol):
break
C = C_new
E = E_new
s = s_new
ctx.save_for_backward(T, C, E)
return C, E
@staticmethod
def backward(ctx, grad):
......@@ -103,23 +103,18 @@ if __name__=='__main__':
T[0, 1, 0, 0] = 1.0
T[1, 0, 0, 0] = 1.0
C = torch.rand(chi, chi, dtype=dtype)
E = torch.rand(chi, d, chi, dtype=dtype)
C = 0.5*(C+C.t())
E = 0.5*(E + E.permute(2, 1, 0))
C = C/C.norm()
E = E/E.norm()
x = torch.cat([C.view(-1), E.view(-1)])
ctmrg = CTMRG.apply
x = ctmrg(T, x, chi, 100)
C, E = ctmrg(T, chi, 100)
x = torch.cat([C.view(-1), E.view(-1)]).numpy()
def fun(x):
return step(T, x, chi).numpy() - x
C = torch.as_tensor(x[:chi**2], dtype=T.dtype, device=T.device).view(chi, chi)
E = torch.as_tensor(x[chi**2:], dtype=T.dtype, device=T.device).view(chi, d, chi)
from scipy import optimize
sol = optimize.root(fun, x.numpy(), method='krylov', options={'fatol':1E-10, 'disp': True})
C, E, _ = step(T, C, E, chi)
return torch.cat([C.view(-1), E.view(-1)]).numpy() - x
from scipy import optimize
sol = optimize.root(fun, x, method='krylov', options={'fatol':1E-12, 'disp': True})
print (sol)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment