Advanced Computing Platform for Theoretical Physics

Commits (2)
<<<<<<< HEAD
#
#
# class Config(object):
# num_layers = 2 # LSTM层数
# data_path = 'data/' # 诗歌的文本文件存放路径
# pickle_path = 'tang.npz' # 预处理好的二进制文件
# author = None # 只学习某位作者的诗歌
# constrain = None # 长度限制
# category = 'poet.tang' # 类别,唐诗还是宋诗歌(poet.song)
# lr = 5e-4
# weight_decay = 5e-4
# use_gpu = True
# epoch = 50
# batch_size = 64
# maxlen = 125 # 超过这个长度的之后字被丢弃,小于这个长度的在前面补空格
# plot_every = 200 # 每200个batch 可视化一次
# # use_env = True # 是否使用visodm
# env = 'poetry' # visdom env
# max_gen_len = 200 # 生成诗歌最长长度
# debug_file = '/tmp/debugp'
# model_path = "./checkpoints/tang_36.pth" # 预训练模型路径
# prefix_words = '仙路尽头谁为峰?一见无始道成空。' # 不是诗歌的组成部分,用来控制生成诗歌的意境
# start_words = '闲云潭影日悠悠' # 诗歌开始
# acrostic = False # 是否是藏头诗
# model_prefix = 'checkpoints/tang' # 模型保存路径
# embedding_dim = 512
# hidden_dim = 1024
# mpo=False
import argparse
import torch
import os
parser = argparse.ArgumentParser()
group = parser.add_argument_group('parameters')
group.add_argument( '--net', type=str, default='mpo', choices=['mpo','mpo2'], help='network type')
group.add_argument( '--num_layers', type=int, default=2, help='num_layers of rnn')
group.add_argument( '--data_path', type=str, default='data/', help='data_path of poet')
group.add_argument( '--pickle_path', type=str, default='tang.npz', help='pickle_path')
group.add_argument( '--category', type=str, default='poet.tang', help='category of poet')
group.add_argument( '--lr', type=float, default=5e-4, help='learning rate')
group.add_argument( '--weight_decay', type=float, default=1e-4, help='weight_decay')
group.add_argument( '--ingpu', action='store_false', help='store everything in GPU')
group.add_argument( '--cuda', type=int, default=0, help='ID of GPU to use, -1 for disabled')
group.add_argument( '--epoch', type=int, default=50, help='maximum number of steps')
group.add_argument( '--batch_size', type=int, default=64, help='number of samples')
group.add_argument( '--vocab_size', type=int, default=8400, help='vocab_size')
group.add_argument( '--maxlen', type=int, default=125, help='maximum number of word in one sample')
group.add_argument( '--plot_every', type=int, default=200, help='plot every x')
group.add_argument( '--env', type=str, default='poetry', help='visdom env')
group.add_argument( '--max_gen_len', type=int, default=200, help='maximum generation length of poet')
group.add_argument( '--debug_file', type=str, default='/tmp/debugp', help='debug_file')
group.add_argument( '--model_path', type=str, default="./checkpoints/tang_49.pth", help='model_path')
group.add_argument( '--prefix_words', type=str, default='仙路尽头谁为峰?一见无始道成空。', help='prefix_words')
group.add_argument( '--start_words', type=str, default='闲云潭影日悠悠', help='start_words')
group.add_argument( '--acrostic', action='store_true', help='acrostic')
group.add_argument( '--model_prefix', type=str, default='checkpoints/tang', help='model_prefix')
group.add_argument( '--embedding_dim', type=int, default=512, help='embedding_dim')
group.add_argument( '--hidden_dim', type=int, default=1024, help='hidden_dim')
group.add_argument( '--mpo', action='store_true', help='mpo')
group.add_argument( '--chi', type=int, default=4, help='Rank in the lr or mpo representations')
group.add_argument( '--dims', type=str, default="8,4,4,8;12,10,7,10", help='dtype')
group.add_argument( '--bias', action='store_true', help='use bias')
group.add_argument( '--dtype', type=str, default='float32', choices=['float32', 'float64'], help='dtype')
group.add_argument( '--cpu', action='store_true', help='use batch_norm_1d')
args = parser.parse_args()
if args.cuda >= 0:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda)
args.use_cuda=True
args.device = torch.device('cuda:0')
else:
args.use_cuda=False
args.device = torch.device('cpu')
if args.dtype == 'float32':
args.dtype = torch.float32
elif args.dtype == 'float64':
args.dtype = torch.float64
else:
raise ValueError('Unknown dtype: {}'.format(args.dtype))
if args.cpu:
args.use_cuda=False
args.device = torch.device('cpu')
#print("args.device=",args.device)
=======
class Config(object):
......@@ -26,3 +119,4 @@ class Config(object):
embedding_dim = 512
hidden_dim = 1024
mpo=False
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
......@@ -6,7 +6,11 @@ from torch import nn
from model import *
from torchnet import meter
import tqdm
<<<<<<< HEAD
from config import args
=======
from config import *
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
from test import *
......@@ -16,7 +20,11 @@ def generate(model, start_words, ix2word, word2ix, prefix_words=None):
start_words_len = len(start_words)
# 第一个词语是<START>
input = t.Tensor([word2ix['<START>']]).view(1, 1).long()
<<<<<<< HEAD
if args.ingpu:
=======
if Config.use_gpu:
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
input = input.cuda()
hidden = None
......@@ -30,7 +38,11 @@ def generate(model, start_words, ix2word, word2ix, prefix_words=None):
# 开始真正生成诗句,如果没有使用风格前缀,则hidden = None,input = <START>
# 否则,input就是风格前缀的最后一个词语,hidden也是生成出来的
<<<<<<< HEAD
for i in range(args.max_gen_len):
=======
for i in range(Config.max_gen_len):
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
output, hidden = model(input, hidden)
# print(output.shape)
# 如果还在诗句内部,输入就是诗句的字,不取出结果,只为了得到
......@@ -56,7 +68,11 @@ def gen_acrostic(model, start_words, ix2word, word2ix, prefix_words=None):
result = []
start_words_len = len(start_words)
input = (t.Tensor([word2ix['<START>']]).view(1, 1).long())
<<<<<<< HEAD
if args.ingpu:
=======
if Config.use_gpu:
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
input = input.cuda()
# 指示已经生成了几句藏头诗
index = 0
......@@ -70,7 +86,11 @@ def gen_acrostic(model, start_words, ix2word, word2ix, prefix_words=None):
input = (input.data.new([word2ix[word]])).view(1, 1)
# 开始生成诗句
<<<<<<< HEAD
for i in range(args.max_gen_len):
=======
for i in range(Config.max_gen_len):
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
output, hidden = model(input, hidden)
top_index = output.data[0].topk(1)[1][0].item()
w = ix2word[top_index]
......
......@@ -6,17 +6,29 @@ from torch import nn
from model import *
from torchnet import meter
import tqdm
<<<<<<< HEAD
from config import args
=======
from config import *
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
from test import *
import sys
import time
def train():
<<<<<<< HEAD
if args.ingpu:
args.device = t.device("cuda")
else:
args.device = t.device("cpu")
device = args.device
=======
if Config.use_gpu:
Config.device = t.device("cuda")
else:
Config.device = t.device("cpu")
device = Config.device
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
datas = np.load("tang.npz",allow_pickle=True)
print(datas)
data = datas['data']
......@@ -31,15 +43,27 @@ def train():
print(ix2word[data[1][0]])
data = t.from_numpy(data)
dataloader = DataLoader(data,
<<<<<<< HEAD
batch_size=args.batch_size,
=======
batch_size=Config.batch_size,
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
shuffle=True,
num_workers=2)
<<<<<<< HEAD
# model = PoetryModel(args, vocab_size=8400,
# embedding_dim=args.embedding_dim,
# hidden_dim = args.hidden_dim,mpo=args.mpo)
model = PoetryModel(args)
Configimizer = optim.Adam(model.parameters(),lr=args.lr)
=======
model = PoetryModel(8400,
embedding_dim=Config.embedding_dim,
hidden_dim = Config.hidden_dim,mpo=Config.mpo)
Configimizer = optim.Adam(model.parameters(),lr=Config.lr)
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
criterion = nn.CrossEntropyLoss()
#if Config.model_path:
#model.load_state_dict(t.load(Config.model_path,map_location='cpu'))
......@@ -48,7 +72,11 @@ def train():
loss_meter = meter.AverageValueMeter()
f = open('result.txt','w')
#sys.exit(0)
<<<<<<< HEAD
for epoch in range(args.epoch):
=======
for epoch in range(Config.epoch):
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
time0=time.time()
loss_meter.reset()
#for li,data_ in tqdm.tqdm(enumerate(dataloader)):
......@@ -72,7 +100,11 @@ def train():
Configimizer.step()
loss_meter.add(loss.item())
# 进行可视化
<<<<<<< HEAD
if (1+li)%args.plot_every == 0:
=======
if (1+li)%Config.plot_every == 0:
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
print("训练损失为%s"%(str(loss_meter.mean)))
f.write("训练损失为%s"%(str(loss_meter.mean)))
for word in list(u"春江花月夜"):
......@@ -81,7 +113,11 @@ def train():
f.write(gen_poetry)
f.write("\n\n\n")
f.flush()
<<<<<<< HEAD
t.save(model.state_dict(),'%s_%s_%s.pth'%(args.model_prefix,args.mpo,epoch))
=======
t.save(model.state_dict(),'%s_%s_%s.pth'%(Config.model_prefix,Config.mpo,epoch))
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
print('used time: ', time.time()-time0)
if __name__ == '__main__':
......
from main import *
<<<<<<< HEAD
from config import args
=======
from config import *
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch
import torch.nn.functional as F
import math
<<<<<<< HEAD
import mpo
class Linear(nn.Module):
def __init__(self, in_feat, out_feat,bias=True):
=======
from mpo import MPO
class Linear(nn.Module):
def __init__(self,in_feat, out_feat,bias=True):
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
super(Linear,self).__init__()
self.in_feat=in_feat
self.out_feat=out_feat
......@@ -25,6 +35,28 @@ class Linear(nn.Module):
return F.linear(input, self.weight, self.bias)
class PoetryModel(nn.Module):
<<<<<<< HEAD
# def __init__(self, args, vocab_size, embedding_dim, hidden_dim=1024, mpo=True):
def __init__(self, args):
super(PoetryModel, self).__init__()
self.args = args
self.hidden_dim = args.hidden_dim
self.embeddings = nn.Embedding(args.vocab_size, args.embedding_dim)
self.gru = nn.GRU(args.embedding_dim, self.hidden_dim, num_layers=args.num_layers)
self.bond_dim = args.chi
self.dims = []
for dim in args.dims.split(';'):
self.dims.append([int(i) for i in dim.split(",")])
assert (np.prod(self.dims[0]) == self.hidden_dim)
assert (np.prod(self.dims[-1]) == args.vocab_size)
if args.mpo == True and args.vocab_size == 8400:
if args.net == 'mpo':
self.linear = mpo.MPO(self.hidden_dim, args.vocab_size, self.dims[0], self.dims[1], self.bond_dim)
elif args.net == 'mpo2':
self.linear = mpo.MPO2(self.dims[0], self.dims[1], chi=args.chi)
elif args.mpo == False:
self.linear = Linear(self.hidden_dim, args.vocab_size)
=======
def __init__(self, vocab_size, embedding_dim, hidden_dim=1024, mpo=True):
super(PoetryModel, self).__init__()
self.hidden_dim = hidden_dim
......@@ -35,12 +67,17 @@ class PoetryModel(nn.Module):
self.linear = MPO(self.hidden_dim, vocab_size, [8,4,4,8],[12,10,7,10],self.bond_dim)
else:
self.linear = Linear(self.hidden_dim, vocab_size)
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
def forward(self, input, hidden=None):
seq_len, batch_size = input.size()
#print(input.shape)
if hidden is None:
<<<<<<< HEAD
h_0 = input.data.new(args.num_layers, batch_size, self.hidden_dim).fill_(0).float()
=======
h_0 = input.data.new(Config.num_layers, batch_size, self.hidden_dim).fill_(0).float()
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
#c_0 = input.data.new(Config.num_layers, batch_size, self.hidden_dim).fill_(0).float()
else:
#h_0, c_0 = hidden
......
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch
import torch.nn.functional as F
import math
class MPO(nn.Module):
#8400*1024 reshape to 10*10*7*12 * 4*4*8*8
def __init__(self,in_feat,out_feat,array_in,array_out,bond_dim,bias=True) :
super(MPO,self).__init__()
self.array_in=array_in
self.array_out=array_out
self.bond_dim=bond_dim
self.in_feat=in_feat
self.out_feat=out_feat
self.define_parameters()
if bias:
self.bias=Parameter(torch.Tensor(out_feat))
self.reset_parameters()
def define_parameters(self):
self.weight=torch.nn.ParameterList([])
for i in range(len(self.array_in)):
if i==0:
self.weight.append(Parameter(torch.Tensor(self.array_in[0],self.array_out[0],self.bond_dim)))
elif i==len(self.array_in)-1:
self.weight.append(Parameter(torch.Tensor(self.array_in[i],self.bond_dim,self.array_out[i])))
else:
self.weight.append(Parameter(torch.Tensor(self.array_in[i],self.bond_dim,self.array_out[i],self.bond_dim)))
def reset_parameters(self):
if self.bias is not None:
fan_out=self.out_feat
bound = 1 / math.sqrt(fan_out)
torch.nn.init.uniform_(self.bias, -bound, bound)
gain=1.0
std = gain * math.sqrt(2.0 / float(self.in_feat + self.out_feat))
a = math.sqrt(3.0) * std
for i in self.weight:
a=math.sqrt(a*math.sqrt(3.0/self.bond_dim))
torch.nn.init.uniform_(i,-a,a)
def forward(self,input):
shape=self.array_in.copy()
shape.insert(0,input.shape[0])
output=input.reshape(shape)
'''
output=torch.einsum('abcde,bmf->acdefm',output,self.weight[0])
output=torch.einsum('acdefm,cfng->adegmn',output,self.weight[1])
output=torch.einsum('adegmn,dgph->aehmnp',output,self.weight[2])
output=torch.einsum('aehmnp,ehq->amnpq',output,self.weight[3]).reshape(-1,self.out_feat)
'''
for i in range(len(self.weight)):
if i==0:
output = torch.einsum('abcde,bmf->acfdem',output,self.weight[i])
elif i==len(self.weight)-1:
output = torch.einsum('abcdef,bcg->adefg',output,self.weight[i]).reshape(-1,self.out_feat)
else:
output = torch.einsum('abcdef,bcgh->adhefg',output,self.weight[i])
if self.bias is not None:
output+=self.bias
# to be auto-contraction
return output
\ No newline at end of file
<<<<<<< HEAD
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch
import numpy as np
import torch.nn.functional as F
import math
from config import args
from utils import AttrProxy
class MPO(nn.Module):
#8400*1024 reshape to 10*10*7*12 * 4*4*8*8
def __init__(self,in_feat,out_feat,array_in,array_out,bond_dim,bias=True) :
super(MPO,self).__init__()
self.array_in=array_in
self.array_out=array_out
self.bond_dim=bond_dim
self.in_feat=in_feat
self.out_feat=out_feat
self.define_parameters()
if bias:
self.bias=Parameter(torch.Tensor(out_feat))
self.reset_parameters()
def define_parameters(self):
self.weight=torch.nn.ParameterList([])
for i in range(len(self.array_in)):
if i==0:
self.weight.append(Parameter(torch.Tensor(self.array_in[0],self.array_out[0],self.bond_dim)))
elif i==len(self.array_in)-1:
self.weight.append(Parameter(torch.Tensor(self.array_in[i],self.bond_dim,self.array_out[i])))
else:
self.weight.append(Parameter(torch.Tensor(self.array_in[i],self.bond_dim,self.array_out[i],self.bond_dim)))
def reset_parameters(self):
if self.bias is not None:
fan_out=self.out_feat
bound = 1 / math.sqrt(fan_out)
torch.nn.init.uniform_(self.bias, -bound, bound)
gain=1.0
std = gain * math.sqrt(2.0 / float(self.in_feat + self.out_feat))
a = math.sqrt(3.0) * std
for i in self.weight:
# print(i.shape)
a=math.sqrt(a*math.sqrt(3.0/self.bond_dim))
torch.nn.init.uniform_(i,-a,a)
def forward(self,input):
shape=self.array_in.copy()
shape.insert(0,input.shape[0])
output=input.reshape(shape)
'''
output=torch.einsum('abcde,bmf->acdefm',output,self.weight[0])
output=torch.einsum('acdefm,cfng->adegmn',output,self.weight[1])
output=torch.einsum('adegmn,dgph->aehmnp',output,self.weight[2])
output=torch.einsum('aehmnp,ehq->amnpq',output,self.weight[3]).reshape(-1,self.out_feat)
'''
for i in range(len(self.weight)):
if i==0:
output = torch.einsum('abcde,bmf->acfdem',output,self.weight[i])
elif i==len(self.weight)-1:
output = torch.einsum('abcdef,bcg->adefg',output,self.weight[i]).reshape(-1,self.out_feat)
else:
output = torch.einsum('abcdef,bcgh->adhefg',output,self.weight[i])
if self.bias is not None:
output+=self.bias
# to be auto-contraction
return output
class MPO2(nn.Module):
def __init__(self, Din, Dout, bias=False, chi=2, seed=-1):
"""
Din (and Dout) should be a tuple containing all input (output) dimensions
"""
super(MPO2, self).__init__()
self.Din = Din
self.Dout = Dout
self.bondim = [chi for i in Din]
self.bondim[-1] = 1
print("Din=", Din, "Dout=", Dout)
assert (len(self.Din) == len(self.Dout))
self.tensors = []
self.npin = np.prod(self.Din)
self.npout = np.prod(self.Dout)
if seed > 0:
torch.manual_seed(seed)
for i, din in enumerate(self.Din):
dout = self.Dout[i]
a = torch.rand(self.bondim[i - 1], self.bondim[i], din, dout) / math.sqrt(self.npout)
exec("self.tensors_" + str(i) + "=Parameter(a.clone())")
if bias:
self.bias = Parameter(torch.zeros([self.npout, 1]))
else:
self.register_parameter('bias', None)
self.tensors = AttrProxy(self, 'tensors_')
print(self)
print("Parameters in the class")
params = list(self.parameters())
params = list(filter(lambda p: p.requires_grad, params))
nparams = int(sum([np.prod(p.shape) for p in params]))
print('Total number of trainable parameters: {}'.format(nparams))
for param in self.parameters():
print(type(param.data), param.size())
self.reset_parameters()
def reset_parameters(self):
self.in_feat = args.hidden_dim
self.out_feat = args.vocab_size
self.bond_dim = args.chi
if self.bias is not None:
fan_out = self.out_feat
bound = 1 / math.sqrt(fan_out)
torch.nn.init.uniform_(self.bias, -bound, bound)
gain = 1.0
std = gain * math.sqrt(2.0 / float(self.in_feat + self.out_feat))
a = math.sqrt(3.0) * std
tensors = []
for j in range(len(self.Din)):
tensors.append(self.tensors[j])
for i in tensors:
# print('i', i.shape)
a = math.sqrt(a * math.sqrt(3.0 / self.bond_dim))
torch.nn.init.uniform_(i, -a, a)
def forward(self, input):
input = input.reshape(input.shape[0],1,1,self.Din[0],-1)
for i in range(len(self.Din)):
input = torch.einsum("bijkl,jakm->bimal",input,self.tensors[i])
Dnext = self.Din[i+1] if i<len(self.Din)-1 else 1
newshape=[input.shape[0],input.shape[1]*input.shape[2],input.shape[3],Dnext,-1]
input = input.contiguous().view(newshape)
return input.contiguous().view(input.shape[0],-1)
=======
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch
import torch.nn.functional as F
import math
class MPO(nn.Module):
#8400*1024 reshape to 10*10*7*12 * 4*4*8*8
def __init__(self,in_feat,out_feat,array_in,array_out,bond_dim,bias=True) :
super(MPO,self).__init__()
self.array_in=array_in
self.array_out=array_out
self.bond_dim=bond_dim
self.in_feat=in_feat
self.out_feat=out_feat
self.define_parameters()
if bias:
self.bias=Parameter(torch.Tensor(out_feat))
self.reset_parameters()
def define_parameters(self):
self.weight=torch.nn.ParameterList([])
for i in range(len(self.array_in)):
if i==0:
self.weight.append(Parameter(torch.Tensor(self.array_in[0],self.array_out[0],self.bond_dim)))
elif i==len(self.array_in)-1:
self.weight.append(Parameter(torch.Tensor(self.array_in[i],self.bond_dim,self.array_out[i])))
else:
self.weight.append(Parameter(torch.Tensor(self.array_in[i],self.bond_dim,self.array_out[i],self.bond_dim)))
def reset_parameters(self):
if self.bias is not None:
fan_out=self.out_feat
bound = 1 / math.sqrt(fan_out)
torch.nn.init.uniform_(self.bias, -bound, bound)
gain=1.0
std = gain * math.sqrt(2.0 / float(self.in_feat + self.out_feat))
a = math.sqrt(3.0) * std
for i in self.weight:
a=math.sqrt(a*math.sqrt(3.0/self.bond_dim))
torch.nn.init.uniform_(i,-a,a)
def forward(self,input):
shape=self.array_in.copy()
shape.insert(0,input.shape[0])
output=input.reshape(shape)
'''
output=torch.einsum('abcde,bmf->acdefm',output,self.weight[0])
output=torch.einsum('acdefm,cfng->adegmn',output,self.weight[1])
output=torch.einsum('adegmn,dgph->aehmnp',output,self.weight[2])
output=torch.einsum('aehmnp,ehq->amnpq',output,self.weight[3]).reshape(-1,self.out_feat)
'''
for i in range(len(self.weight)):
if i==0:
output = torch.einsum('abcde,bmf->acfdem',output,self.weight[i])
elif i==len(self.weight)-1:
output = torch.einsum('abcdef,bcg->adefg',output,self.weight[i]).reshape(-1,self.out_feat)
else:
output = torch.einsum('abcdef,bcgh->adhefg',output,self.weight[i])
if self.bias is not None:
output+=self.bias
# to be auto-contraction
return output
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
This diff is collapsed.
from main import *
from model import *
<<<<<<< HEAD
from config import args
=======
from config import *
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
import torch as t
from generate import *
import time
......@@ -11,9 +15,16 @@ def userTest():
data = datas['data']
ix2word = datas['ix2word'].item()
word2ix = datas['word2ix'].item()
<<<<<<< HEAD
# model = PoetryModel(args, 8400, args.embedding_dim, args.hidden_dim,mpo=args.mpo)
model = PoetryModel(args)
model.load_state_dict(t.load(args.model_path, 'cpu'))
if args.ingpu:
=======
model = PoetryModel(8400, Config.embedding_dim, Config.hidden_dim,mpo=Config.mpo)
model.load_state_dict(t.load(Config.model_path, 'cpu'))
if Config.use_gpu:
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
model.to(t.device('cuda'))
print("初始化完成!\n")
while True:
......@@ -21,7 +32,11 @@ def userTest():
"输入1 进入首句生成模式\n"
"输入2 进入藏头诗生成模式\n")
mode = int(input())
<<<<<<< HEAD
if mode == 1:
=======
if mode == 1:
>>>>>>> e443d794910976efa32a5d2bb653992e4de650da
print("请输入您想要的诗歌首句,可以是五言或七言")
start_words = str(input())
time0=time.time()
......
import glob
import io
import os
import numpy as np
import torch
from torch import nn
from config import args
def mylog(s):
if args.log:
with open(args.log, 'a', newline='\n') as f:
f.write(s + u'\n')
if not args.no_stdout:
print(s)
else:
print(s)
class AttrProxy(object):
"""Translates index lookups into attribute lookups."""
def __init__(self, module, prefix):
self.module = module
self.prefix = prefix
def __getitem__(self, i):
return getattr(self.module, self.prefix + str(i))