Advanced Computing Platform for Theoretical Physics

Commit f1e0e64e authored by Lei Wang's avatar Lei Wang
Browse files

cache boundary MPS

parent 1afdb8f6
......@@ -34,12 +34,16 @@ if __name__=='__main__':
Niter = args.Niter
B = 0.01* torch.randn(d, D, D, D, D, dtype=dtype, device=device)
#symmetrize
#symmetrize initial boundary PEPS
B = (B + B.permute(0, 4, 2, 3, 1))/2.
B = (B + B.permute(0, 1, 3, 2, 4))/2.
B = (B + B.permute(0, 3, 4, 1, 2))/2.
B = (B + B.permute(0, 2, 1, 4, 3))/2.
A = torch.nn.Parameter(B.view(d, D**4))
#boundary MPS
A1 = torch.nn.Parameter(0.01*torch.randn(Dcut, D**2*d, Dcut, dtype=dtype, device=device))
A2 = torch.nn.Parameter(0.01*torch.randn(Dcut, D**2, Dcut, dtype=dtype, device=device))
#dimer covering
T = torch.zeros(d, d, d, d, d, d, dtype=dtype, device=device)
......@@ -61,9 +65,8 @@ if __name__=='__main__':
#double layer
T2 = (A.t()@A).view(D, D, D, D, D, D, D, D).permute(0,4, 1,5, 2,6, 3,7).contiguous().view(D**2, D**2, D**2, D**2)
t0=time.time()
lnT, _ = contraction(T1, D**2*d, Dcut, Niter)
lnZ, _ = contraction(T2, D**2, Dcut, Niter)
lnT = contraction(T1, D**2*d, Dcut, Niter, A1)
lnZ = contraction(T2, D**2, Dcut, Niter, A2)
loss = (-lnT + lnZ)
print (' contraction done {:.3f}s'.format(time.time()-t0))
print (' total loss', loss.item())
......
......@@ -28,13 +28,16 @@ def mpsrg(A, T):
# C = torch.mm(C, C)
return -lnZ1 + lnZ2
def vmps(T, d, D, no_iter, Nepochs=50):
def vmps(T, d, D, Nepochs=50, Ainit=None):
#symmetrize
T = (T + T.permute(3, 1, 2, 0))/2. #left-right
T = (T + T.permute(0, 2, 1, 3))/2. #up-down
A = torch.nn.Parameter(0.01*torch.randn(D, d, D, dtype=T.dtype, device=T.device))
if Ainit is None:
A = torch.nn.Parameter(0.01*torch.randn(D, d, D, dtype=T.dtype, device=T.device))
else:
A = Ainit
optimizer = torch.optim.LBFGS([A], max_iter=20)
def closure():
......@@ -54,7 +57,7 @@ def vmps(T, d, D, no_iter, Nepochs=50):
loss = optimizer.step(closure)
#print (' epoch, free energy', epoch, loss.item())
return -mpsrg(A.detach(), T), None # pass lnZ out, we need to optimize over T
return -mpsrg(A.detach(), T) # pass lnZ out, we need to optimize over T
if __name__=='__main__':
import time
......@@ -63,7 +66,6 @@ if __name__=='__main__':
parser.add_argument("-D", type=int, default=2, help="D")
parser.add_argument("-Dcut", type=int, default=20, help="Dcut")
parser.add_argument("-beta", type=float, default=0.44, help="beta")
parser.add_argument("-Niter", type=int, default=32, help="Niter")
parser.add_argument("-Nepochs", type=int, default=100, help="Nepochs")
parser.add_argument("-float32", action='store_true', help="use float32")
parser.add_argument("-cuda", type=int, default=-1, help="use GPU")
......@@ -79,7 +81,7 @@ if __name__=='__main__':
T = torch.einsum('ai,aj,ak,al->ijkl', (M, M, M, M))
#optimization
lnZ, _ = vmps(T, 2, args.Dcut, args.Niter, args.Nepochs)
lnZ, _ = vmps(T, 2, args.Dcut, args.Nepochs)
#recompute lnZ using optimized A
dlnZ = torch.autograd.grad(lnZ, K, create_graph=True)[0] # En = -d lnZ / d beta
print (-dlnZ.item())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment