Spaces:
Sleeping
Sleeping
cheesexuebao
commited on
Commit
•
ae15986
1
Parent(s):
0198505
Adding Blob
Browse files- app.py +64 -4
- bert-base-uncased/bert_config.json +13 -0
- bert-base-uncased/config.json +23 -0
- bert-base-uncased/pytorch_model.bin +3 -0
- bert-base-uncased/vocab.txt +0 -0
- requirments.txt +3 -0
app.py
CHANGED
@@ -1,7 +1,67 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
return "Hello " + name + "!!"
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import BertModel
|
4 |
+
from transformers import BertTokenizer
|
5 |
+
import torch.nn as nn
|
6 |
|
7 |
+
class BertSST2Model(nn.Module):
|
|
|
8 |
|
9 |
+
# 初始化类
|
10 |
+
def __init__(self, class_size, pretrained_name='bert-base-chinese'):
|
11 |
+
"""
|
12 |
+
Args:
|
13 |
+
class_size :指定分类模型的最终类别数目,以确定线性分类器的映射维度
|
14 |
+
pretrained_name :用以指定bert的预训练模型
|
15 |
+
"""
|
16 |
+
# 类继承的初始化,固定写法
|
17 |
+
super(BertSST2Model, self).__init__()
|
18 |
+
# 加载HuggingFace的BertModel
|
19 |
+
# BertModel的最终输出维度默认为768
|
20 |
+
# return_dict=True 可以使BertModel的输出具有dict属性,即以 bert_output['last_hidden_state'] 方式调用
|
21 |
+
self.bert = BertModel.from_pretrained(pretrained_name,
|
22 |
+
return_dict=True)
|
23 |
+
# 通过一个线性层将[CLS]标签对应的维度:768->class_size
|
24 |
+
# class_size 在SST-2情感分类任务中设置为:2
|
25 |
+
self.classifier = nn.Linear(768, class_size)
|
26 |
+
|
27 |
+
def forward(self, inputs):
|
28 |
+
# 获取DataLoader中已经处理好的输入数据:
|
29 |
+
# input_ids :tensor类型,shape=batch_size*max_len max_len为当前batch中的最大句长
|
30 |
+
# input_tyi :tensor类型,
|
31 |
+
# input_attn_mask :tensor类型,因为input_ids中存在大量[Pad]填充,attention mask将pad部分值置为0,让模型只关注非pad部分
|
32 |
+
input_ids, input_tyi, input_attn_mask = inputs['input_ids'], inputs[
|
33 |
+
'token_type_ids'], inputs['attention_mask']
|
34 |
+
# 将三者输入进模型,如果想知道模型内部如何运作,前面的蛆以后再来探索吧~
|
35 |
+
output = self.bert(input_ids, input_tyi, input_attn_mask)
|
36 |
+
# bert_output 分为两个部分:
|
37 |
+
# last_hidden_state:最后一个隐层的值
|
38 |
+
# pooler output:对应的是[CLS]的输出,用于分类任务
|
39 |
+
# 通过线性层将维度:768->2
|
40 |
+
# categories_numberic:tensor类型,shape=batch_size*class_size,用于后续的CrossEntropy计算
|
41 |
+
categories_numberic = self.classifier(output.pooler_output)
|
42 |
+
return categories_numberic
|
43 |
+
|
44 |
+
|
45 |
+
device = torch.device("cpu")
|
46 |
+
pretrained_model_name = './bert-base-uncased'
|
47 |
+
# 创建模型 BertSST2Model
|
48 |
+
model = BertSST2Model(2, pretrained_model_name)
|
49 |
+
# 固定写法,将模型加载到device上,
|
50 |
+
# 如果是GPU上运行,此时可以观察到GPU的显存增加
|
51 |
+
model.to(device)
|
52 |
+
# 加载预训练模型对应的tokenizer
|
53 |
+
tokenizer = BertTokenizer.from_pretrained(pretrained_model_name)
|
54 |
+
|
55 |
+
|
56 |
+
def modelscope_quickstart(sentence):
|
57 |
+
inputs = tokenizer(sentence,
|
58 |
+
padding=True,
|
59 |
+
truncation=True,
|
60 |
+
return_tensors="pt",
|
61 |
+
max_length=512)
|
62 |
+
output = model(inputs)
|
63 |
+
cate = output.argmax(dim=1)
|
64 |
+
return "分类结果为:" + str(0 if cate else 1)
|
65 |
+
|
66 |
+
demo = gr.Interface(fn=modelscope_quickstart, inputs="text", outputs="text")
|
67 |
+
demo.launch()
|
bert-base-uncased/bert_config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"attention_probs_dropout_prob": 0.1,
|
3 |
+
"hidden_act": "gelu",
|
4 |
+
"hidden_dropout_prob": 0.1,
|
5 |
+
"hidden_size": 768,
|
6 |
+
"initializer_range": 0.02,
|
7 |
+
"intermediate_size": 3072,
|
8 |
+
"max_position_embeddings": 512,
|
9 |
+
"num_attention_heads": 12,
|
10 |
+
"num_hidden_layers": 12,
|
11 |
+
"type_vocab_size": 2,
|
12 |
+
"vocab_size": 30522
|
13 |
+
}
|
bert-base-uncased/config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"gradient_checkpointing": false,
|
7 |
+
"hidden_act": "gelu",
|
8 |
+
"hidden_dropout_prob": 0.1,
|
9 |
+
"hidden_size": 768,
|
10 |
+
"initializer_range": 0.02,
|
11 |
+
"intermediate_size": 3072,
|
12 |
+
"layer_norm_eps": 1e-12,
|
13 |
+
"max_position_embeddings": 512,
|
14 |
+
"model_type": "bert",
|
15 |
+
"num_attention_heads": 12,
|
16 |
+
"num_hidden_layers": 12,
|
17 |
+
"pad_token_id": 0,
|
18 |
+
"position_embedding_type": "absolute",
|
19 |
+
"transformers_version": "4.6.0.dev0",
|
20 |
+
"type_vocab_size": 2,
|
21 |
+
"use_cache": true,
|
22 |
+
"vocab_size": 30522
|
23 |
+
}
|
bert-base-uncased/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:097417381d6c7230bd9e3557456d726de6e83245ec8b24f529f60198a67b203a
|
3 |
+
size 440473133
|
bert-base-uncased/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirments.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch==1.8.0
|
2 |
+
transformers==4.12.0
|
3 |
+
tqdm
|