import torch def step(T, C, E, chi): dimT, dimE = T.shape[0], E.shape[0] D_new = min(dimE*dimT, chi) # step 1: contruct the density matrix Rho Rho = torch.tensordot(C,E,([1],[0])) # C(ef)*EU(fga)=Rho(ega) Rho = torch.tensordot(Rho,E,([0],[0])) # Rho(ega)*EL(ehc)=Rho(gahc) Rho = torch.tensordot(Rho,T,([0,2],[0,1])) # Rho(gahc)*T(ghdb)=Rho(acdb) Rho = Rho.permute(0,3,1,2).contiguous().view(dimE*dimT, dimE*dimT) # Rho(acdb)->Rho(ab;cd) Rho = Rho+Rho.t() Rho = Rho/Rho.norm() # step 2: Get Isometry P U, S, V = torch.svd(Rho) truncation_error = S[D_new:].sum()/S.sum() P = U[:, :D_new] # projection operator #can also do symeig since Rho is symmetric #S, U = torch.symeig(Rho, eigenvectors=True) #sorted, indices = torch.sort(S.abs(), descending=True) #truncation_error = sorted[D_new:].sum()/sorted.sum() #S = S[indices][:D_new] #P = U[:, indices][:, :D_new] # projection operator #fix gauge, first row should be positive P = P*P[0, :].sign() # step 3: renormalize C and E C = (P.t() @ Rho @ P) #C(D_new, D_new) ## EL(u,r,d) P = P.view(dimE,dimT,D_new) E = torch.tensordot(E, P, ([0],[0])) # EL(def)P(dga)=E(efga) E = torch.tensordot(E, T, ([0,2],[1,0])) # E(efga)T(gehb)=E(fahb) E = torch.tensordot(E, P, ([0,2],[0,1])) # E(fahb)P(fhc)=E(abc) # step 4: symmetrize C and E C = 0.5*(C+C.t()) E = 0.5*(E + E.permute(2, 1, 0)) return C/C.norm(), E/E.norm(), S.abs()/S.abs().max() class CTMRG(torch.autograd.Function): @staticmethod def forward(ctx, T, chi, maxiter=50, tol=1E-12): C = T.sum((0,1)) # E = T.sum(1).permute(0,2,1) s = torch.zeros(chi, dtype=T.dtype, device=T.device) for n in range(maxiter): C_new, E_new, s_new = step(T, C, E, chi) if (s.numel() == s_new.numel()): assert(C_new.numel() == chi**2) diff1 = torch.dist(C, C_new) diff2 = torch.dist(E, E_new) diff = torch.dist(s, s_new) print (n, diff1.item(), diff2.item(), diff.item()) if (diff1 < tol and diff2 < tol): break C = C_new E = E_new s = s_new ctx.save_for_backward(T, C, E) return C, E @staticmethod def backward(ctx, grad): T, x_star = detach_variable(ctx.saved_tensors) dT = grad for n in range(args.Maxiter): with torch.enable_grad(): x = step(T, x_star) grad = torch.autograd.grad(x, x_star, grad_outputs=grad)[0] grad_norm = torch.norm(grad) if (grad_norm > args.tol): dT = dT + grad else: break print ('backward converged to', n, grad_norm.item()) with torch.enable_grad(): x = step(T, x_star) dT = torch.autograd.grad(x, T, grad_outputs=dT)[0] return dT, None, None, None, None if __name__=='__main__': import time torch.manual_seed(42) d = 2 chi = 40 device = 'cpu' dtype = torch.float64 # T(u,l,d,r) T = torch.zeros(d, d, d, d, dtype=dtype, device=device) T[0, 0, 0, 1] = 1.0 T[0, 0, 1, 0] = 1.0 T[0, 1, 0, 0] = 1.0 T[1, 0, 0, 0] = 1.0 ctmrg = CTMRG.apply C, E = ctmrg(T, chi, 1000, 1E-6) x = torch.cat([C.view(-1), E.view(-1)]).numpy() def fun(x): C = torch.as_tensor(x[:chi**2], dtype=T.dtype, device=T.device).view(chi, chi) E = torch.as_tensor(x[chi**2:], dtype=T.dtype, device=T.device).view(chi, d, chi) C, E, _ = step(T, C, E, chi) return torch.cat([C.view(-1), E.view(-1)]).numpy() - x from scipy import optimize sol = optimize.root(fun, x, method='krylov', options={'fatol':1E-13, 'disp': True, 'jac_options':{'method': 'lgmres'} }) print (sol)