Advanced Computing Platform for Theoretical Physics

Commit 598e2af4 authored by Chen-Xiao Dong 's avatar Chen-Xiao Dong
Browse files

simple notes and code


Signed-off-by: Chen-Xiao Dong 's avatarchenxiaodong <cxdong@iphy.ac.cn>
parents
Pipeline #1050 canceled with stages
*.aux
*.blg
*.log
*.out
*.synctex.gz
.DS_Store
*~
note.pdf
*.pyc
import scipy.sparse as sps
from numpy import zeros
import numpy as np
class Lattice:
def __init__(self,L, d, BC='periodic'):
self.L = L
self.d = d
self.shape = [L]*d
self.Nsite = L**d
self.BC = BC
def move(self, idx, d, shift):
coord = self.index2coord(idx)
coord[d] += shift
if self.BC != 'periodic':
if (coord[d]>=self.L) or (coord[d]<0):
return None
#wrap around because of the PBC
if (coord[d]>=self.L): coord[d] -= self.L;
if (coord[d]<0): coord[d] += self.L;
return self.coord2index(coord)
def index2coord(self, idx):
coord = zeros(self.d, int)
for d in range(self.d):
coord[self.d-d-1] = idx%self.L;
idx /= self.L
return coord
def coord2index(self, coord):
idx = coord[0]
for d in range(1, self.d):
idx *= self.L;
idx += coord[d]
return idx
class Hypercube(Lattice):
def __init__(self,L, d, BC='periodic'):
super(Hypercube, self).__init__(L, d, BC)
self.Adj = zeros((self.Nsite,self.Nsite), int)
for i in range(self.Nsite):
for d in range(self.d):
j = self.move(i, d, 1)
if j is not None:
self.Adj[i, j] = 1.0
self.Adj[j, i] = 1.0
self.deAdj = zeros((self.Nsite,self.Nsite), int)
for i in range(self.Nsite):
for d in range(self.d):
j = self.move(i, d, 1)
if j is not None:
self.deAdj[i, i] = 1.0
self.deAdj[i, j] = -1.0
class Triangular(Lattice):
def __init__(self, L):
super(Triangular, self).__init__(L, 2)
self.Adj = zeros((self.Nsite,self.Nsite), int)
for i in range(self.Nsite):
for d in range(self.d):
j = self.move(i, d, 1)
if j is not None:
self.Adj[i, j] = 1.0
self.Adj[j, i] = 1.0
#diagonal
j = self.move(i, 0, 1)
k = self.move(j, 1, 1)
if (j is not None) and (k is not None):
self.Adj[i, k] = 1.0
self.Adj[k, i] = 1.0
if __name__=='__main__':
from scipy.linalg import eigh
L=4
d=2
lattice = Hypercube(L, d)
#lattice = Triangular(L)
print (lattice.Adj)
T = 2.269185314213022
K = lattice.Adj/T
def energy(s):
return 0.5 * np.dot(np.dot(np.transpose(s), K) , s)
#enumerate all state
w = []
for i in range(1<<lattice.Nsite):
config = "{0:b}".format(i).zfill(lattice.Nsite)
spins = np.array([2*int(s)-1 for s in config])
print (i, spins)
w.append(-energy(spins))
from scipy.misc import logsumexp
print (-logsumexp(w))
sys.exit(1)
w, v = eigh(lattice.Adj)
v.shape = (L, L, L**2)
print (w)
import matplotlib.pyplot as plt
plt.figure()
#plt.subplots(211)
#plt.imshow(v[:, :, -2])
#plt.subplots(212)
plt.imshow(v[:, :, 2], vmin=-1, vmax=1)
plt.show()
import math
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from modelpro import LieFlow
from s1object import xyS1ring#Trivial2D
if __name__=='__main__':
n=30
target=xyS1ring(n,beta=1)
#target=Trivial2D(n)
model=LieFlow(depth=3,dim=n,udim=5)
optimizer=torch.optim.Adam(model.parameters(),lr=1e-2)
params= list(model.parameters())
params= list(filter(lambda p:p.requires_grad,params))
nparams= sum([np.prod(p.size())for p in params])
print('params number:',nparams)
np_losses=[]
for e in range(0,350):
x, logp= model.sample(1000)
loss= logp.mean() -target(x).mean()
model.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
print(e,loss.item())
np_losses.append([loss.item()])
#sample=x.detach().numpy()
#plt.cla()
#plt.plot(sample[:,0],sample[:,1],'o')
#plt.draw()
#plt.pause(0.02)
#if (e%10==0):
# plt.savefig('E:/flownormalization/data/ns1/projection_{:04d}.png'.format(e))
#for i in model.named_parameters():
# print(i)
plt.cla()
np_losses=np.array(np_losses)
plt.plot(np_losses)
plt.xlabel('$epoch$')
plt.ylabel('$loss$')
mx=np.arange(-1, 351, 1)
my=0*mx
plt.plot(mx,my)
plt.savefig('E:/flownormalization/data/ns1/loss.png')
plt.show()
\ No newline at end of file
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.autograd import Function
from torch.autograd import gradcheck
import numpy as np
def normalf(x):
y=x
while ((y>1)|(y<-1)):
if y>1:
y=y-2
else:
y=y+2
return y
class Normalayer(Function):
@staticmethod
def forward(ctx,x):
y=x
z=y.detach().numpy()
z=np.array(list(map(lambda x:list(map(normalf,x)),z)))
return torch.Tensor.float(torch.from_numpy(z))
@staticmethod
def backward(ctx,grad_output):
return grad_output
class Perlayer(nn.Module):
def __init__(self):
super(Perlayer,self).__init__()
self.logjac=0
def forward(self,x):
y=Normalayer.apply(x)
return y
def inverse(self,y):
x=Normalayer.apply(y)
return x
class SLinearlayer(nn.Module):
def __init__(self,dim):
super(SLinearlayer,self).__init__()
self.disp=nn.Parameter(torch.FloatTensor(dim),requires_grad=True)
self.disp.data.uniform_(0, 0.3)
self.logjac=0
def forward(self,x):
y=self.disp+x
y=Normalayer.apply(y)
return y
def inverse(self,y):
x=y-self.disp
x=Normalayer.apply(x)
return x
class SNonLinearlayer(nn.Module):
def __init__(self):
super(SNonLinearlayer,self).__init__()
def forward(self,x):
self.logjac=x.new_zeros(x.shape[0])
y= torch.where(x >= 0, (torch.exp(x)-1)/(math.exp(1)-1), -(torch.exp(-x)-1)/(math.exp(1)-1))
self.logjac=torch.where(x >= 0,x-math.log(math.exp(1)-1),-x-math.log(math.exp(1)-1)).sum(1)
return y
def inverse(self,y):
self.logjac=y.new_zeros(y.shape[0])
x=torch.where(y >= 0,torch.log(y*(math.exp(1)-1)+1),-torch.log(-y*(math.exp(1)-1)+1))
self.logjac=torch.where(y >= 0,1/(y+1/(math.exp(1)-1)),1/(-y+1/(math.exp(1)-1))).sum(1)
return x
class SNonLinearlayerplus(nn.Module):
def __init__(self,dim):
super(SNonLinearlayerplus,self).__init__()
self.k=nn.Parameter(torch.FloatTensor(dim),requires_grad=True)
self.k.data.uniform_(0, 0.5)
self.logjac=0
def forward(self,x):
self.logjac=x.new_zeros(x.shape[0])
m=torch.exp(self.k)
y= torch.where(x >= 0, (torch.exp(m*x)-1)/(torch.exp(m)-1), -(torch.exp(-m*x)-1)/(torch.exp(m)-1))
self.logjac=torch.where(x >= 0,torch.log(m)+m*x-torch.log(torch.exp(m)-1),torch.log(m)-m*x-torch.log(torch.exp(m)-1)).sum(1)
return y
def inverse(self,y):
self.logjac=y.new_zeros(y.shape[0])
x=torch.where(y >= 0,torch.log(y*(torch.exp(m)-1)+1)/m,-torch.log(-y*(torch.exp(m)-1)+1)/m)
self.logjac=torch.where(y >= 0,1/m/(y+1/(m-1)),-1/m/(-y+1/(m-1))).sum(1)
return x
class parallelop(Function):
@staticmethod
def forward(ctx,x,m,lx,ly):
y=x
locx=int((lx.detach().numpy())[0])
locy=int((ly.detach().numpy())[0])
a=np.eye(x.size()[1])
a[locx][locy]=m.detach().numpy()[0]
b=torch.tensor(a).float()
ctx.save_for_backward(x,b,lx,ly)
y=y.mm(b.t())
return y
@staticmethod
def backward(ctx,grad_output):
x,b,lx,ly=ctx.saved_tensors
locx=int((lx.detach().numpy())[0])
locy=int((ly.detach().numpy())[0])
grad_input=grad_m=grad_x=grad_y=None
if ctx.needs_input_grad[0]:
grad_input=grad_output.mm(b)
if ctx.needs_input_grad[1]:
grad_b=grad_output.t().mm(x)
grad_m=torch.ones(1)*grad_b[locx][locy]
if ctx.needs_input_grad[2]:
grad_x=torch.ones(1)
if ctx.needs_input_grad[3]:
grad_x=torch.ones(1)
return grad_input,grad_m,grad_x,grad_y
class Mixlayer(nn.Module):
def __init__(self,xloc,yloc):
super(Mixlayer,self).__init__()
self.m=nn.Parameter(torch.FloatTensor(1),requires_grad=True)
self.m.data.uniform_(0, 0.3)
self.logjac=0
self.xloc=torch.tensor([xloc]);
self.yloc=torch.tensor([yloc]);
def forward(self,x):
self.logjac=x.new_zeros(x.shape[0])
y=parallelop.apply(x,self.m,self.xloc,self.yloc)
y=Normalayer.apply(y)
return y
def inverse(self,y):
self.logjac=y.new_zeros(y.shape[0])
x=parallelop.apply(y,-self.m,self.xloc,self.yloc)
x=Normalayer.apply(x)
return x
class LieFlow(nn.Module):
def __init__(self, depth, dim,udim, device='cpu', name=None):
super(LieFlow , self).__init__()
self.device = device
self.dim=dim
self.layers=nn.ModuleList()
if name is None:
self.name = 'LieFlow'
else:
self.name = name
self.layers.append(Perlayer())
for d in range(depth):
for e1 in range(dim):
for e2 in range(e1+1,dim):
self.layers.append(Mixlayer(e1,e2))
self.layers.append(Mixlayer(e2,e1))
for e in range(udim):
self.layers.append(SLinearlayer(dim))
self.layers.append(SNonLinearlayerplus(dim))
self.layers.append(Perlayer())
def forward(self, x):
'''
from latent to physical
'''
self.logjac=x.new_zeros(x.shape[0])
m=0
for d in self.layers:
x = d(x)
self.logjac +=d.logjac
return x
def inverse(self, y):
'''
from physical to latent
'''
self.logjac=y.new_zeros(y.shape[0])
for d in reversed(self.layers):
y = d.inverse(y)
self.logjac +=d.logjac
return y
def calgd(self, z):
logp=z.new_zeros(z.shape[0])
l=z.new_zeros(z.size())
for m in range(-1,2):
l+= torch.exp(-2*(z.add(m*2)).pow(2))
logp+= torch.log(l).add(-0.5*math.log(0.5*math.pi)).sum(1)
return logp
def sample(self, batch_size):
z= torch.Tensor(batch_size, self.dim).normal_(0,0.5)
x= self.forward(z)
logp= self.calgd(z)-self.logjac
return x,logp
def logprob(self, x):
z=self.inverse(x)
return self.callogp(z)+self.logjac
if __name__=='__main__':
model=LieFlow(depth=5,dim=3,udim=2,name='test')
print(model)
x =torch.randn(2,3,requires_grad=True)
print(x)
m=model.forward(x)
print(m)
m=m.mean()
m.backward()
print(x.grad)
print(list(model.parameters()))
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.autograd import Function
from torch.autograd import gradcheck
import numpy as np
def normalf(x):
y=x
while ((y>1)|(y<-1)):
if y>1:
y=y-2
else:
y=y+2
return y
class Normalayer(Function):
@staticmethod
def forward(ctx,x):
y=x
z=y.detach().numpy()
z=np.array(list(map(lambda x:list(map(normalf,x)),z)))
return torch.Tensor.float(torch.from_numpy(z))
@staticmethod
def backward(ctx,grad_output):
return grad_output
class Perlayer(nn.Module):
def __init__(self):
super(Perlayer,self).__init__()
self.logjac=0
def forward(self,x):
y=Normalayer.apply(x)
return y
def inverse(self,y):
x=Normalayer.apply(y)
return x
class SLinearlayer(nn.Module):
def __init__(self,dim):
super(SLinearlayer,self).__init__()
self.disp=nn.Parameter(torch.FloatTensor(dim),requires_grad=True)
self.disp.data.uniform_(0, 0.3)
self.logjac=0
def forward(self,x):
y=self.disp+x
y=Normalayer.apply(y)
return y
def inverse(self,y):
x=y-self.disp
x=Normalayer.apply(x)
return x
class SNonLinearlayer(nn.Module):
def __init__(self):
super(SNonLinearlayer,self).__init__()
def forward(self,x):
self.logjac=x.new_zeros(x.shape[0])
y= torch.where(x >= 0, (torch.exp(x)-1)/(math.exp(1)-1), -(torch.exp(-x)-1)/(math.exp(1)-1))
self.logjac=torch.where(x >= 0,x-math.log(math.exp(1)-1),-x-math.log(math.exp(1)-1)).sum(1)
return y
def inverse(self,y):
self.logjac=y.new_zeros(y.shape[0])
x=torch.where(y >= 0,torch.log(y*(math.exp(1)-1)+1),-torch.log(-y*(math.exp(1)-1)+1))
self.logjac=torch.where(y >= 0,1/(y+1/(math.exp(1)-1)),1/(-y+1/(math.exp(1)-1))).sum(1)
return x
class SNonLinearlayerplus(nn.Module):
def __init__(self,dim):
super(SNonLinearlayerplus,self).__init__()
self.k=nn.Parameter(torch.FloatTensor(dim),requires_grad=True)
self.k.data.uniform_(0, 0.5)
self.logjac=0
def forward(self,x):
self.logjac=x.new_zeros(x.shape[0])
m=torch.exp(self.k)
y= torch.where(x >= 0, (torch.exp(m*x)-1)/(torch.exp(m)-1), -(torch.exp(-m*x)-1)/(torch.exp(m)-1))
self.logjac=torch.where(x >= 0,torch.log(m)+m*x-torch.log(torch.exp(m)-1),torch.log(m)-m*x-torch.log(torch.exp(m)-1)).sum(1)
return y
def inverse(self,y):
self.logjac=y.new_zeros(y.shape[0])
x=torch.where(y >= 0,torch.log(y*(torch.exp(m)-1)+1)/m,-torch.log(-y*(torch.exp(m)-1)+1)/m)
self.logjac=torch.where(y >= 0,1/m/(y+1/(m-1)),-1/m/(-y+1/(m-1))).sum(1)
return x
class NewSNonLinearlayerplus(nn.Module):
def __init__(self,dim):
super(SNonLinearlayerplus,self).__init__()
self.k=nn.Parameter(torch.FloatTensor(dim),requires_grad=True)
self.k.data.uniform_(0, 0.5)
self.logjac=0
def forward(self,x):
self.logjac=x.new_zeros(x.shape[0])
m=torch.exp(self.k)
y= torch.where(x >= 0, x, -(torch.exp(-m*x)-1)/(torch.exp(m)-1))
self.logjac=torch.where(x >= 0,0,torch.log(m)-m*x-torch.log(torch.exp(m)-1)).sum(1)
return y
def inverse(self,y):
self.logjac=y.new_zeros(y.shape[0])
x=torch.where(y >= 0,y,-torch.log(-y*(torch.exp(m)-1)+1)/m)
self.logjac=torch.where(y >= 0,0,-1/m/(-y+1/(m-1))).sum(1)
return x
class parallelop(Function):
@staticmethod
def forward(ctx,x,m):
y=x
m=m.detach().numpy()
n=np.size(x.detach().numpy(),1)
m1=np.zeros((n,n))
m2=np.zeros((n,n))
for a in range(0,n):
for b in range(0,a):
m1[a][b]=m[a][b]
m1[a][a]=1
for a in range(0,n):
for b in range(a,n-1):
m2[a][b+1]=m[a][b]
m2[a][a]=1
m1=torch.from_numpy(m1).float()
m2=torch.from_numpy(m2).float()
y1=y.mm(m1.t())
y1=Normalayer.apply(y1)
y2=y1.mm(m2.t())
ctx.save_for_backward(x,y1,m1,m2)
return y2
@staticmethod
def backward(ctx,grad_output):
input1,input2, m1, m2 = ctx.saved_tensors
if ctx.needs_input_grad[0]:
grad_input1=grad_output.mm(m2)
grad_input2=grad_input1.mm(m1)
if ctx.needs_input_grad[1]:
grad_input1=grad_output.mm(m2)
grad_input2=grad_input1.mm(m1)
grad_m2=grad_output.t().mm(input2)
grad_m1=grad_input1.t().mm(input1)
n=np.size(input1.detach().numpy(),1)
m=np.zeros((n,n-1))
for a in range(0,n):
for b in range(0,a):
m[a][b]=grad_m1[a][b]
for a in range(0,n):
for b in range(a,n-1):
m[a][b]=grad_m2[a][b+1]
grad_m=torch.from_numpy(m).float()
return grad_input2,grad_m
class Mixlayer(nn.Module):
def __init__(self,dim):
super(Mixlayer,self).__init__()
self.m=nn.Parameter(torch.FloatTensor(dim,dim-1),requires_grad=True)
self.m.data.uniform_(0, 0.1)
self.logjac=0
def forward(self,x):
self.logjac=x.new_zeros(x.shape[0])
y=parallelop.apply(x,self.m)
y=Normalayer.apply(y)
return y
def inverse(self,y):
self.logjac=y.new_zeros(y.shape[0])
x=parallelop.apply(y,-self.m)
x=Normalayer.apply(x)
return x
class LieFlow(nn.Module):
def __init__(self, depth, dim,udim, device='cpu', name=None):
super(LieFlow , self).__init__()
self.device = device
self.dim=dim
self.layers=nn.ModuleList()
if name is None:
self.name = 'LieFlow'
else:
self.name = name
self.layers.append(Perlayer())
for d in range(depth):
self.layers.append(Mixlayer(dim))
for e in range(udim):
self.layers.append(SLinearlayer(dim))
self.layers.append(SNonLinearlayerplus(dim))
self.layers.append(Perlayer())
def forward(self, x):
'''
from latent to physical
'''
self.logjac=x.new_zeros(x.shape[0])
m=0
for d in self.layers:
x = d(x)
self.logjac +=d.logjac
return x
def inverse(self, y):
'''
from physical to latent
'''
self.logjac=y.new_zeros(y.shape[0])
for d in reversed(self.layers):
y = d.inverse(y)
self.logjac +=d.logjac
return y
def calgd(self, z):
logp=z.new_zeros(z.shape[0])
l=z.new_zeros(z.size())
for m in range(-1,2):
l+= torch.exp(-2*(z.add(m*2)).pow(2))
logp+= torch.log(l).add(-0.5*math.log(0.5*math.pi)).sum(1)
return logp
def sample(self, batch_size):
z= torch.Tensor(batch_size, self.dim).normal_(0,0.5)
x= self.forward(z)
logp= self.calgd(z)-self.logjac
return x,logp
def logprob(self, x):
z=self.inverse(x)
return self.callogp(z)+self.logjac
if __name__=='__main__':
'''
model=LieFlow(depth=2,dim=3,udim=2,name='test')
print(model)
y =torch.zeros(10,3,requires_grad=True)
x=model.forward(y)
print(y)
print(x)
'''
x =torch.zeros(3,2,requires_grad=True)
print(x)
y =torch.randn(1,3,requires_grad=True)
print(y)
l=parallelop.apply(y,x)
m=l.mean()
m.backward()
print(l)
print(m)
print(x.grad)
print(y.grad)
import math
import torch
import numpy as np
from template import Target
from lattice import Hypercube
class TrivialS1D(Target):
def __init__(self):
super(TrivialS1D,self).__init__(1.0,'TrivialS1D')
def logprob(self,x):
return x*0
class twopeak(Target):
def __init__(self):
super(twopeak,self).__init__(1.0,'twopeak')
def logprob(self,x):
return torch.log(x.pow(2)-x.pow(4)+0.1)
class twopeakplus(Target):
def __init__(self):
super(twopeakplus,self).__init__(1.0,'twopeakplus')
def logprob(self,x):
return torch.where(x>0,torch.log(torch.sin(x*math.pi)+0.1),torch.log(torch.sin(-x*math.pi)+0.1))
class xy1Dchain(Target):
def __init__(self):
super(xy1D,self).__init__(1.0,'xy1D')
def logprob(self,x):
return 0;
class xyS1ring(Target):
def __init__(self,npoint,beta=1.0):
super(xyS1ring,self).__init__(1.0,'xyS1ring')
self.lattice = Hypercube(npoint, 1, 'periodic')
self.K = self.lattice.deAdj#decrease matrix
self.beta=beta
def logprob(self,x):
return self.beta*torch.cos(math.pi*x.mm(torch.Tensor(self.K).t())).sum(1)-27.7379
class Trivial2D(Target):
def __init__(self,dim):
super(Trivial2D,self).__init__(1.0,'TrivialS2D')
self.dim=dim
def logprob(self,x):
return x*0-math.log(2)*self.dim
if __name__ == '__main__':
target=xyS1ring(2)
x=torch.Tensor([[1,0.5],[0.1,0.5]])
print(target.logprob(x))
import torch
from torch.autograd import Variable
from torch import nn
import numpy as np
class Target(nn.Module):
'''
base class for target
'''
def __init__(self,nvars,name = "Target", beta=1.0):
super(Target, self).__init__()
self.nvars = nvars
self.name = name
self.beta = beta
def __call__(self, x):
return self.logprob(x)
def logprob(self,z):
raise NotImplementedError(str(type(self)))
def backward(self,z):
z = Variable(z.data,requires_grad=True)
out = self.logprob(z)
batchSize = z.size()[0]
if isinstance(z.data,torch.DoubleTensor):
out.backward(torch.ones(batchSize).double())
else:
out.backward(torch.ones(batchSize))
return Variable(z.grad.data,requires_grad = True)
def measure(self, x):
return (x.data**2).sum(dim=1).cpu().numpy()
note @ 8ca51c0d
Subproject commit 8ca51c0d80ca3aaf86da397d22e3fe1b45f683c2
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment