提交 e62faed2 编写于 作者: Stevezhangz's avatar Stevezhangz

Update README.md

上级 abcb87ad
......@@ -29,33 +29,32 @@ except that, you have to learn about how to run it on your dataset
for example:
np.random.seed(random_seed)
#json2list=general_transform_text2list("data/demo.txt",type="txt")
json2list=general_transform_text2list("data/chinese-poetry/chuci/chuci.json",type="json",args=['content'])
data=json2list.getdata()
list2token=generate_vocab_normalway(data,map_dir="words_info.json")
sentences,token_list,idx2word,word2idx,vocab_size=list2token.transform()
batch = creat_batch(batch_size,max_pred,maxlen,vocab_size,word2idx,token_list,sentences)
input_ids, segment_ids, masked_tokens, masked_pos, isNext = zip(*batch)
input_ids, segment_ids, masked_tokens, masked_pos, isNext = \
torch.LongTensor(input_ids), torch.LongTensor(segment_ids), torch.LongTensor(masked_tokens), \
torch.LongTensor(masked_pos), torch.LongTensor(isNext)
loader = Data.DataLoader(Text_file(input_ids, segment_ids, masked_tokens, masked_pos, isNext), batch_size, True)
batch = creat_batch(batch_size,max_pred,maxlen,word2idx,idx2word,token_list,0.15)
loader = Data.DataLoader(Text_file(batch), batch_size, True)
model=Bert(n_layers=n_layers,
vocab_size=vocab_size,
emb_size=d_model,
max_len=maxlen,
seg_size=n_segments,
dff=d_ff,
dk=d_k,
dv=d_v,
n_head=n_heads,
n_class=2,
)
vocab_size=vocab_size,
emb_size=d_model,
max_len=maxlen,
seg_size=n_segments,
dff=d_ff,
dk=d_k,
dv=d_v,
n_head=n_heads,
n_class=2,
drop=drop)
if use_gpu:
with torch.cuda.device(device) as device:
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adadelta(model.parameters(), lr=lr)
model.Train(epoches=epoches,
model.Train_for_mask_guess(epoches=epoches,
train_data_loader=loader,
optimizer=optimizer,
criterion=criterion,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册