Advanced Computing Platform for Theoretical Physics

Commit 26647790 authored by Lei Wang's avatar Lei Wang
Browse files

initial commit

parents
Pipeline #805 canceled with stages
'''
This code solves counting problem [1] using differentiable TRG programming [2]
In a nutshell, it computes the maximum eigenvalue of tranfer matrix via variational principle
[1] Vanderstraeten et al, PRE 98, 042145 (2018)
[2] Levin and Nave, PRL 99, 120601 (2007)
'''
import torch
torch.manual_seed(42)
def levin_nave_trg(T, D, Dcut, no_iter, device='cpu'):
lnZ = 0.0
for n in range(no_iter):
#print(n, " ", T.max(), " ", T.min())
maxval = T.max()
T = T/maxval
lnZ += 2**(no_iter-n)*torch.log(maxval)
Ma = T.permute(2, 1, 0, 3).contiguous().view(D**2, D**2)
Mb = T.permute(3, 2, 1, 0).contiguous().view(D**2, D**2)
Ua, Sa, Va = torch.svd(Ma)
Ub, Sb, Vb = torch.svd(Mb)
D_new = min(D**2, Dcut)
S1 = (Ua[:, :D_new]* torch.sqrt(Sa[:D_new])).view(D, D, D_new)
S3 = (Va[:, :D_new]* torch.sqrt(Sa[:D_new])).view(D, D, D_new)
S2 = (Ub[:, :D_new]* torch.sqrt(Sb[:D_new])).view(D, D, D_new)
S4 = (Vb[:, :D_new]* torch.sqrt(Sb[:D_new])).view(D, D, D_new)
T_new = torch.einsum('war,abu,bgl,gwd->ruld', (S1, S2, S3, S4))
D = D_new
T = T_new
trace = 0.0
for i in range(D):
trace += T[i, i, i, i]
lnZ += torch.log(trace)
return lnZ
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument("-D", type=int, default=2, help="D")
parser.add_argument("-Dcut", type=int, default=24, help="Dcut")
parser.add_argument("-Niter", type=int, default=20, help="Niter")
parser.add_argument("-float32", action='store_true', help="use float32")
parser.add_argument("-cuda", type=int, default=-1, help="use GPU")
args = parser.parse_args()
device = torch.device("cpu" if args.cuda<0 else "cuda:"+str(args.cuda))
dtype = torch.float32 if args.float32 else torch.float64
d = 2 # fixed
D = args.D
Dcut = args.Dcut
Niter = args.Niter
A = torch.nn.Parameter(0.01* torch.randn(d, D**4, dtype=dtype, device=device, requires_grad=True))
#dimer covering
T = torch.zeros(d, d, d, d, d, d, dtype=dtype, device=device)
T[0, 0, 0, 0, 0, 1] = 1.0
T[0, 0, 0, 0, 1, 0] = 1.0
T[0, 0, 0, 1, 0, 0] = 1.0
T[0, 0, 1, 0, 0, 0] = 1.0
T[0, 1, 0, 0, 0, 0] = 1.0
T[1, 0, 0, 0, 0, 0] = 1.0
T = T.view(d, d**4, d)
optimizer = torch.optim.LBFGS([A], max_iter=20)
def closure():
optimizer.zero_grad()
T1 = torch.einsum('xa,xby,yc' , (A,T,A)).view(D,D,D,D, d,d,d,d, D,D,D,D).permute(0,4,8, 1,5,9, 2,6,10, 3,7,11).contiguous().view(D**2*d, D**2*d, D**2*d, D**2*d)
#double layer
T2 = (A.t()@A).view(D, D, D, D, D, D, D, D).permute(0,4, 1,5, 2,6, 3,7).contiguous().view(D**2, D**2, D**2, D**2)
loss = (-levin_nave_trg(T1, D**2*d, Dcut, Niter) + levin_nave_trg(T2, D**2, Dcut, Niter))/2**Niter
print (-loss.item())
loss.backward()
return loss
optimizer.step(closure)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment