Advanced Computing Platform for Theoretical Physics

Commit 55a2240e authored by Lei Wang's avatar Lei Wang
Browse files

initial commit

parents
Pipeline #931 canceled with stages
import numpy as np
import torch
def expm(A, q=10):
eA = torch.eye(A.shape[0], dtype=A.dtype, device=A.device)
trm = torch.eye(A.shape[0], dtype=A.dtype, device=A.device)
for k in range(1, q):
trm = trm@A / k
eA += trm
return eA
class Ising(torch.nn.Module):
def __init__(self, chi, niter, dtype=torch.float64, device='cpu', use_checkpoint=False):
super(Ising, self).__init__()
self.D = 2
self.chi = chi
self.niter = niter
self.dtype = dtype
B = torch.randn(niter, self.chi**2, self.chi**2, dtype=dtype, device=device)
B = B/B.norm()
self.A = torch.nn.Parameter(B)
def isometry(self, step):
chi = min(self.D**(2**(step//2)), self.chi)
#print (step, self.D**(step//2+1), chi)
A = self.A[step, :chi**2, :chi**2]
w = expm(A - A.t())
chi_new = min(self.chi, chi**2)
return w[:, :chi_new].view(chi, chi, chi_new)
def forward(self, K):
# initial tensor
K = torch.tensor([K], dtype=self.dtype)
c = torch.sqrt(torch.cosh(K)/2.)
s = torch.sqrt(torch.sinh(K)/2.)
M = torch.stack([torch.cat([c+s, c-s]), torch.cat([c-s, c+s])])
T = torch.einsum('ai,aj,ak,al->ijkl', (M, M, M, M))
lnZ = 0.0
for n in range(self.niter):
f = torch.norm(T)
lnZ += 2**(self.niter-n)*torch.log(f)
T = T / f
U = self.isometry(n) # isometry for this step
#print (T.shape , U.shape)
T = torch.einsum('axob,amz,moyn,bnw->xwzy',(T, U, T, U))
#print ('truncate done {:.3f}s'.format(time.time()-t0))
trace = 0.0
for x in range(T.shape[0]):
for y in range(T.shape[1]):
trace += T[x, y, y, x]
lnZ += torch.log(trace)
return lnZ/2**self.niter
if __name__=='__main__':
model = Ising(20, 20, dtype=torch.float64, device='cpu')
optimizer = torch.optim.LBFGS(model.parameters(), max_iter=10)
params = list(model.parameters())
params = list(filter(lambda p: p.requires_grad, params))
nparams = sum([np.prod(p.size()) for p in params])
print ('total nubmer of trainable parameters:', nparams)
def closure():
optimizer.zero_grad()
loss = model.forward(0.44)
loss.backward()
return loss
for epoch in range(20):
loss = optimizer.step(closure)
print (epoch, loss.item())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment