Advanced Computing Platform for Theoretical Physics

dimer_covering.py 2.79 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
'''
This code solves counting problem [1] using differentiable TRG programming [2] 
In a nutshell, it computes the maximum eigenvalue of tranfer matrix via variational principle
[1] Vanderstraeten et al, PRE 98, 042145 (2018)
[2] Levin and Nave, PRL 99, 120601 (2007)
'''
import torch 
torch.set_num_threads(4)
torch.manual_seed(42)

11
#from hotrg2 import hotrg as contraction
Lei Wang's avatar
Lei Wang committed
12
#from trg import levin_nave_trg as contraction
13
from ctmrg import ctmrg as contraction
Lei Wang's avatar
Lei Wang committed
14
from itertools import permutations
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35

if __name__=='__main__':
    import time
    import argparse
    parser = argparse.ArgumentParser(description='')
    parser.add_argument("-D", type=int, default=2, help="D")
    parser.add_argument("-Dcut", type=int, default=24, help="Dcut")
    parser.add_argument("-Niter", type=int, default=20, help="Niter")

    parser.add_argument("-float32", action='store_true', help="use float32")
    parser.add_argument("-cuda", type=int, default=-1, help="use GPU")
    args = parser.parse_args()
    device = torch.device("cpu" if args.cuda<0 else "cuda:"+str(args.cuda))
    dtype = torch.float32 if args.float32 else torch.float64

    d = 2 # fixed

    D = args.D
    Dcut = args.Dcut
    Niter = args.Niter

Lei Wang's avatar
Lei Wang committed
36
37
    B = 0.01* torch.randn(d, D, D, D, D, dtype=dtype, device=device)
    #symmetrize
Lei Wang's avatar
Lei Wang committed
38
39
40
41
42
    B = (B + B.permute(0, 4, 2, 3, 1))/2. 
    B = (B + B.permute(0, 1, 3, 2, 4))/2. 
    B = (B + B.permute(0, 3, 4, 1, 2))/2. 
    B = (B + B.permute(0, 2, 1, 4, 3))/2. 
    A = torch.nn.Parameter(B.view(d, D**4))
Lei Wang's avatar
Lei Wang committed
43

44
45
46
47
48
49
50
51
52
53
    #dimer covering
    T = torch.zeros(d, d, d, d, d, d, dtype=dtype, device=device)
    T[0, 0, 0, 0, 0, 1] = 1.0 
    T[0, 0, 0, 0, 1, 0] = 1.0 
    T[0, 0, 0, 1, 0, 0] = 1.0 
    T[0, 0, 1, 0, 0, 0] = 1.0 
    T[0, 1, 0, 0, 0, 0] = 1.0 
    T[1, 0, 0, 0, 0, 0] = 1.0 
    T = T.view(d, d**4, d)

Lei Wang's avatar
Lei Wang committed
54
55
    optimizer = torch.optim.LBFGS([A], max_iter=20)
    #optimizer = torch.optim.Adam([A])
56
57
58
59
60
61
62
63
64
    
    def closure():
        optimizer.zero_grad()

        T1 = torch.einsum('xa,xby,yc' , (A,T,A)).view(D,D,D,D, d,d,d,d, D,D,D,D).permute(0,4,8, 1,5,9, 2,6,10, 3,7,11).contiguous().view(D**2*d, D**2*d, D**2*d, D**2*d)
        #double layer 
        T2 = (A.t()@A).view(D, D, D, D, D, D, D, D).permute(0,4, 1,5, 2,6, 3,7).contiguous().view(D**2, D**2, D**2, D**2)
        t0=time.time()

Lei Wang's avatar
Lei Wang committed
65
66
        lnT, error1 = contraction(T1, D**2*d, Dcut, Niter)
        lnZ, error2 = contraction(T2, D**2, Dcut, Niter)
67
        loss = (-lnT + lnZ)
Lei Wang's avatar
Lei Wang committed
68
69
        print ('    contraction done {:.3f}s'.format(time.time()-t0))
        print ('    loss, error', loss.item(), error1.item(), error2.item())
70
71
72

        t0=time.time()
        loss.backward()
Lei Wang's avatar
Lei Wang committed
73
        print ('    backward done {:.3f}s'.format(time.time()-t0))
74
        return loss
75
76
77
    
    for epoch in range(100):
        loss = optimizer.step(closure)
78
        print ('epoch, residual entropy', epoch, -loss.item())