Advanced Computing Platform for Theoretical Physics

commit大文件会使得服务器变得不稳定，请大家尽量只commit代码，不要commit大的文件。

Commit 400ab95a by Lei Wang

### correct symmetry

parent 5d8e8921
 ... ... @@ -4,7 +4,10 @@ from itertools import permutations def ctmrg(T, d, Dcut, no_iter): #symmetrize T = (T + T.permute(3, 1, 2, 0) + T.permute(0, 2, 1, 3) + T.permute(2, 3, 0, 1) + T.permute(1, 0, 3, 2))/5. T = (T + T.permute(3, 1, 2, 0))/2. T = (T + T.permute(0, 2, 1, 3))/2. T = (T + T.permute(2, 3, 0, 1))/2. T = (T + T.permute(1, 0, 3, 2))/2. lnZ = 0.0 truncation_error = 0.0 ... ...
 ... ... @@ -35,9 +35,11 @@ if __name__=='__main__': B = 0.01* torch.randn(d, D, D, D, D, dtype=dtype, device=device) #symmetrize A = (B + B.permute(0, 4, 2, 3, 1) + B.permute(0, 1, 3, 2, 4) + B.permute(0, 3, 4, 1, 2) + B.permute(0, 2, 1, 4, 3))/5. A = A.view(d, D**4) A = torch.nn.Parameter(A) B = (B + B.permute(0, 4, 2, 3, 1))/2. B = (B + B.permute(0, 1, 3, 2, 4))/2. B = (B + B.permute(0, 3, 4, 1, 2))/2. B = (B + B.permute(0, 2, 1, 4, 3))/2. A = torch.nn.Parameter(B.view(d, D**4)) #dimer covering T = torch.zeros(d, d, d, d, d, d, dtype=dtype, device=device) ... ... @@ -63,12 +65,12 @@ if __name__=='__main__': lnT, error1 = contraction(T1, D**2*d, Dcut, Niter) lnZ, error2 = contraction(T2, D**2, Dcut, Niter) loss = (-lnT + lnZ) print ('contraction done {:.3f}s'.format(time.time()-t0)) print ('loss, error', loss.item(), error1.item(), error2.item()) print (' contraction done {:.3f}s'.format(time.time()-t0)) print (' loss, error', loss.item(), error1.item(), error2.item()) t0=time.time() loss.backward() print ('backward done {:.3f}s'.format(time.time()-t0)) print (' backward done {:.3f}s'.format(time.time()-t0)) return loss for epoch in range(100): ... ...
 ... ... @@ -15,7 +15,8 @@ if __name__=='__main__': parser.add_argument("-D", type=int, default=2, help="D") parser.add_argument("-Dcut", type=int, default=20, help="Dcut") parser.add_argument("-beta", type=float, default=0.22, help="beta") parser.add_argument("-Niter", type=int, default=20, help="Niter") parser.add_argument("-Niter", type=int, default=32, help="Niter") parser.add_argument("-Nepochs", type=int, default=100, help="Nepochs") parser.add_argument("-float32", action='store_true', help="use float32") parser.add_argument("-cuda", type=int, default=-1, help="use GPU") ... ... @@ -32,9 +33,11 @@ if __name__=='__main__': B = 0.01* torch.randn(d, D, D, D, D, dtype=dtype, device=device) #symmetrize A = (B + B.permute(0, 4, 2, 3, 1) + B.permute(0, 1, 3, 2, 4) + B.permute(0, 3, 4, 1, 2) + B.permute(0, 2, 1, 4, 3))/5. A = A.view(d, D**4) A = torch.nn.Parameter(A) B = (B + B.permute(0, 4, 2, 3, 1))/2. B = (B + B.permute(0, 1, 3, 2, 4))/2. B = (B + B.permute(0, 3, 4, 1, 2))/2. B = (B + B.permute(0, 2, 1, 4, 3))/2. A = torch.nn.Parameter(B.view(d, D**4)) #3D Ising c = torch.sqrt(torch.cosh(beta)) ... ... @@ -65,7 +68,7 @@ if __name__=='__main__': print (' backward done {:.3f}s'.format(time.time()-t0)) return loss for epoch in range(100): for epoch in range(args.Nepochs): loss = optimizer.step(closure) En = beta.grad.item() # En = -d lnZ / d beta print ('epoch, free energy, energy', epoch, loss.item(), En)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!