Advanced Computing Platform for Theoretical Physics

dimer_covering.py 2.54 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
'''
This code solves counting problem [1] using differentiable TRG programming [2] 
In a nutshell, it computes the maximum eigenvalue of tranfer matrix via variational principle
[1] Vanderstraeten et al, PRE 98, 042145 (2018)
[2] Levin and Nave, PRL 99, 120601 (2007)
'''
import torch 
torch.set_num_threads(4)
torch.manual_seed(42)

11
#from hotrg2 import hotrg as contraction
Lei Wang's avatar
Lei Wang committed
12
#from trg import levin_nave_trg as contraction
13
from ctmrg import ctmrg as contraction
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45

if __name__=='__main__':
    import time
    import argparse
    parser = argparse.ArgumentParser(description='')
    parser.add_argument("-D", type=int, default=2, help="D")
    parser.add_argument("-Dcut", type=int, default=24, help="Dcut")
    parser.add_argument("-Niter", type=int, default=20, help="Niter")

    parser.add_argument("-float32", action='store_true', help="use float32")
    parser.add_argument("-cuda", type=int, default=-1, help="use GPU")
    args = parser.parse_args()
    device = torch.device("cpu" if args.cuda<0 else "cuda:"+str(args.cuda))
    dtype = torch.float32 if args.float32 else torch.float64

    d = 2 # fixed

    D = args.D
    Dcut = args.Dcut
    Niter = args.Niter

    A = torch.nn.Parameter(0.01* torch.randn(d, D**4, dtype=dtype, device=device, requires_grad=True))
    #dimer covering
    T = torch.zeros(d, d, d, d, d, d, dtype=dtype, device=device)
    T[0, 0, 0, 0, 0, 1] = 1.0 
    T[0, 0, 0, 0, 1, 0] = 1.0 
    T[0, 0, 0, 1, 0, 0] = 1.0 
    T[0, 0, 1, 0, 0, 0] = 1.0 
    T[0, 1, 0, 0, 0, 0] = 1.0 
    T[1, 0, 0, 0, 0, 0] = 1.0 
    T = T.view(d, d**4, d)

46
47
    #optimizer = torch.optim.LBFGS([A], max_iter=10)
    optimizer = torch.optim.Adam([A])
48
49
50
51
52
53
54
55
56
    
    def closure():
        optimizer.zero_grad()

        T1 = torch.einsum('xa,xby,yc' , (A,T,A)).view(D,D,D,D, d,d,d,d, D,D,D,D).permute(0,4,8, 1,5,9, 2,6,10, 3,7,11).contiguous().view(D**2*d, D**2*d, D**2*d, D**2*d)
        #double layer 
        T2 = (A.t()@A).view(D, D, D, D, D, D, D, D).permute(0,4, 1,5, 2,6, 3,7).contiguous().view(D**2, D**2, D**2, D**2)
        t0=time.time()

Lei Wang's avatar
Lei Wang committed
57
58
        lnT, error1 = contraction(T1, D**2*d, Dcut, Niter)
        lnZ, error2 = contraction(T2, D**2, Dcut, Niter)
59
        loss = (-lnT + lnZ)
60
61
62
63
64
65
66
        print ('contraction done {:.3f}s'.format(time.time()-t0))
        print ('residual entropy', -loss.item(), error1.item(), error2.item())

        t0=time.time()
        loss.backward()
        print ('backward done {:.3f}s'.format(time.time()-t0))
        return loss
67
68
69
70
    
    for epoch in range(100):
        loss = optimizer.step(closure)
        #print ('epoch, loss', epoch, loss)