Advanced Computing Platform for Theoretical Physics

Commit cc16404c authored by Lei Wang's avatar Lei Wang
Browse files

more grad

parent 8046b81f
...@@ -24,7 +24,7 @@ lnZ = lnZ + torch.log(torch.trace(M))/(2**Niter) ...@@ -24,7 +24,7 @@ lnZ = lnZ + torch.log(torch.trace(M))/(2**Niter)
impurity_grad = (M@M).t()/torch.trace(M@T@M) impurity_grad = (M@M).t()/torch.trace(M@T@M)
#(2) autograd on lnZ #(2) autograd on lnZ
lnZ.backward() lnZ.backward(retain_graph=True)
lnZ_grad = T.grad.clone() lnZ_grad = T.grad.clone()
#(3) direct compute dominant eigenvalue #(3) direct compute dominant eigenvalue
...@@ -39,7 +39,7 @@ exact_grad = ((T.grad + T.grad.t())/2) # need to symmetrize since it is an upper ...@@ -39,7 +39,7 @@ exact_grad = ((T.grad + T.grad.t())/2) # need to symmetrize since it is an upper
#or, HF theorem #or, HF theorem
eigenvector_grad= torch.ger(v[:,-1], v[:, -1])/w[-1] # outer product of the leading eigenvector and its transpose eigenvector_grad= torch.ger(v[:,-1], v[:, -1])/w[-1] # outer product of the leading eigenvector and its transpose
#(5) lnZ using envioment #(5) lnZ using eigenvector environment
w, v = torch.symeig(T, eigenvectors=True) w, v = torch.symeig(T, eigenvectors=True)
T.grad.zero_() T.grad.zero_()
...@@ -49,10 +49,22 @@ e = v[:, -1] #environment ...@@ -49,10 +49,22 @@ e = v[:, -1] #environment
#e = v[:, -1].detach() #environment #e = v[:, -1].detach() #environment
loss = torch.log(e@T@e) loss = torch.log(e@T@e)
loss.backward(retain_graph=True)
v_environment_grad = T.grad.clone()
#(6) lnZ using matrix enviroment
T.grad.zero_()
#try this
#e = M #environment
#and this
e = M.detach() #environment this will be the same as (1)
loss = torch.log(torch.trace(e@T@e))
loss.backward() loss.backward()
environment_grad = T.grad.clone() M_environment_grad = T.grad.clone()
print ((impurity_grad-exact_grad).abs().max().item()) print ((impurity_grad-exact_grad).abs().max().item())
print ((lnZ_grad-exact_grad).abs().max().item()) print ((lnZ_grad-exact_grad).abs().max().item())
print ((eigenvector_grad-exact_grad).abs().max().item()) print ((eigenvector_grad-exact_grad).abs().max().item())
print ((environment_grad-exact_grad).abs().max().item()) print ((v_environment_grad-exact_grad).abs().max().item())
print ((M_environment_grad-exact_grad).abs().max().item())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment