Advanced Computing Platform for Theoretical Physics

Commit fface86d authored by Lei Wang's avatar Lei Wang
Browse files

add newton in dimer

parent b4485128
...@@ -27,7 +27,8 @@ if __name__=='__main__': ...@@ -27,7 +27,8 @@ if __name__=='__main__':
parser = argparse.ArgumentParser(description='') parser = argparse.ArgumentParser(description='')
parser.add_argument("-D", type=int, default=2, help="D") parser.add_argument("-D", type=int, default=2, help="D")
parser.add_argument("-Dcut", type=int, default=30, help="Dcut") parser.add_argument("-Dcut", type=int, default=30, help="Dcut")
parser.add_argument("-Niter", type=int, default=20, help="Niter") parser.add_argument("-Niter", type=int, default=50, help="Niter")
parser.add_argument("-Nepochs", type=int, default=10, help="Nepochs")
parser.add_argument("-float32", action='store_true', help="use float32") parser.add_argument("-float32", action='store_true', help="use float32")
parser.add_argument("-lanczos_steps", type=int, default=0, help="lanczos steps") parser.add_argument("-lanczos_steps", type=int, default=0, help="lanczos steps")
...@@ -62,8 +63,7 @@ if __name__=='__main__': ...@@ -62,8 +63,7 @@ if __name__=='__main__':
T[1, 0, 0, 0, 0, 0] = 1.0 T[1, 0, 0, 0, 0, 0] = 1.0
T = T.view(d, d**4, d) T = T.view(d, d**4, d)
optimizer = torch.optim.LBFGS([A], max_iter=20, tolerance_grad=0) optimizer = torch.optim.LBFGS([A], max_iter=20, tolerance_grad=0, tolerance_change=0, line_search_fn="strong_wolfe")
#optimizer = torch.optim.Adam([A])
def closure(): def closure():
optimizer.zero_grad() optimizer.zero_grad()
...@@ -87,6 +87,64 @@ if __name__=='__main__': ...@@ -87,6 +87,64 @@ if __name__=='__main__':
#print (' backward done {:.3f}s'.format(time.time()-t0)) #print (' backward done {:.3f}s'.format(time.time()-t0))
return loss return loss
for epoch in range(100): for epoch in range(args.Nepochs):
loss = optimizer.step(closure) loss = optimizer.step(closure)
print ('epoch, residual entropy', epoch, -loss.item()) print ('epoch, residual entropy', epoch, -loss.item())
#continue with Newton-CG
####################################################################3
def fun(x, info):
A = torch.as_tensor(x).requires_grad_()
As = symmetrize(A)
T1 = torch.einsum('xa,xby,yc' , (As,T,As)).view(D,D,D,D, d,d,d,d, D,D,D,D).permute(0,4,8, 1,5,9, 2,6,10, 3,7,11).contiguous().view(D**2*d, D**2*d, D**2*d, D**2*d)
T2 = (As.t()@As).view(D, D, D, D, D, D, D, D).permute(0,4, 1,5, 2,6, 3,7).contiguous().view(D**2, D**2, D**2, D**2)
t0=time.time()
lnT, error1 = contraction(T1, D**2*d, Dcut, Niter)
lnZ, error2 = contraction(T2, D**2, Dcut, Niter)
loss = (-lnT + lnZ)
loss.backward(create_graph=True)
info['feval'] += 1
info['loss'] = loss
info['A'] = A
print (info['feval'], loss.item(), A.grad.norm().item())
return loss.item(), A.grad.detach().numpy().ravel()
# see this https://code.itp.ac.cn/wanglei/dpeps/commit/2e47d663bb2c8e155967c4b644edf63d9e9b22e9
def hessp(x, p, info):
A = info['A']
if not ((x == A.detach().numpy().ravel()).all()):
grad = A.grad.view(-1)
else:
A = torch.as_tensor(A).requires_grad_()
As = symmetrize(A)
T1 = torch.einsum('xa,xby,yc' , (As,T,As)).view(D,D,D,D, d,d,d,d, D,D,D,D).permute(0,4,8, 1,5,9, 2,6,10, 3,7,11).contiguous().view(D**2*d, D**2*d, D**2*d, D**2*d)
T2 = (As.t()@As).view(D, D, D, D, D, D, D, D).permute(0,4, 1,5, 2,6, 3,7).contiguous().view(D**2, D**2, D**2, D**2)
t0=time.time()
lnT, error1 = contraction(T1, D**2*d, Dcut, Niter)
lnZ, error2 = contraction(T2, D**2, Dcut, Niter)
loss = (-lnT + lnZ)
loss.backward(create_graph=True)
grad = A.grad.view(-1)
#dot it with the given vector
loss = torch.dot(grad, torch.as_tensor(p))
hvp = torch.autograd.grad(loss, A)[0].view(-1)
return hvp.numpy().ravel()
import scipy.optimize
x0 = A.detach().numpy().ravel()
x = scipy.optimize.minimize(fun, x0, args=({'feval':0},), jac=True, hessp=hessp, method='Newton-CG', options={'xtol':1E-8})
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment