scfengv commited on
Commit
da86c67
·
1 Parent(s): 4de05d3

Upload General Layer classifier

Browse files
checkpoint.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d594e095e33799b5ac10602d95ee0924686ba944acc72c177caa57de06ff1054
3
  size 1227506403
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7609a031219a1e4c32fd253c89a5bbe08132e5560740342edbc9f95e3193d5d
3
  size 1227506403
config.json CHANGED
@@ -10,18 +10,18 @@
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "id2label": {
13
- "0": "Cheer",
14
- "1": "Game",
15
- "2": "Broadcast",
16
- "3": "Chat"
17
  },
18
  "initializer_range": 0.02,
19
  "intermediate_size": 3072,
20
  "label2id": {
21
- "Broadcast": "2",
22
- "Chat": "3",
23
- "Cheer": "0",
24
- "Game": "1"
25
  },
26
  "layer_norm_eps": 1e-12,
27
  "max_position_embeddings": 512,
 
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3"
17
  },
18
  "initializer_range": 0.02,
19
  "intermediate_size": 3072,
20
  "label2id": {
21
+ "Broadcast": 2,
22
+ "Chat": 3,
23
+ "Cheer": 0,
24
+ "Game": 1
25
  },
26
  "layer_norm_eps": 1e-12,
27
  "max_position_embeddings": 512,
inference_example_1.py DELETED
@@ -1,17 +0,0 @@
1
- import torch
2
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
3
-
4
- model = AutoModelForSequenceClassification.from_pretrained(
5
- "scfengv/TVL_GeneralLayerClassifier",
6
- id2label = {0: "Cheer", 1: "Game", 2: "Broadcast", 3: "Chat"},
7
- label2id = {"Cheer": 0, "Game": 1, "Broadcast": 2, "Chat": 3}
8
- )
9
- tokenizer = AutoTokenizer.from_pretrained("scfengv/TVL_GeneralLayerClassifier")
10
-
11
- inputs = tokenizer("中纖加油加油加油加油加油", return_tensors = "pt")
12
-
13
- with torch.no_grad():
14
- logits = model(**inputs).logits
15
-
16
- predicted_class_id = logits.argmax().item()
17
- print(f"Predicted class: {predicted_class_id}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
inference_example_2.py DELETED
@@ -1,17 +0,0 @@
1
- import torch
2
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
3
-
4
- model = AutoModelForSequenceClassification.from_pretrained(
5
- "scfengv/TVL_GeneralLayerClassifier",
6
- id2label = {0: "Cheer", 1: "Game", 2: "Broadcast", 3: "Chat"},
7
- label2id = {"Cheer": 0, "Game": 1, "Broadcast": 2, "Chat": 3}
8
- )
9
- tokenizer = AutoTokenizer.from_pretrained("scfengv/TVL_GeneralLayerClassifier")
10
-
11
- inputs = tokenizer("導播幽默~", return_tensors = "pt")
12
-
13
- with torch.no_grad():
14
- logits = model(**inputs).logits
15
-
16
- predicted_class_id = logits.argmax().item()
17
- print(f"Predicted class: {predicted_class_id}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
inference_example_3.py DELETED
@@ -1,17 +0,0 @@
1
- import torch
2
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
3
-
4
- model = AutoModelForSequenceClassification.from_pretrained(
5
- "scfengv/TVL_GeneralLayerClassifier",
6
- id2label = {0: "Cheer", 1: "Game", 2: "Broadcast", 3: "Chat"},
7
- label2id = {"Cheer": 0, "Game": 1, "Broadcast": 2, "Chat": 3}
8
- )
9
- tokenizer = AutoTokenizer.from_pretrained("scfengv/TVL_GeneralLayerClassifier")
10
-
11
- inputs = tokenizer("地震", return_tensors = "pt")
12
-
13
- with torch.no_grad():
14
- logits = model(**inputs).logits
15
-
16
- predicted_class_id = logits.argmax().item()
17
- print(f"Predicted class: {predicted_class_id}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0818306c441a11114cb1f44c394b306a053f852fa7bb6e8ed04f4a1227120b2
3
  size 409110584
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d7dae159b7de063195b05f2fc19a13d0e48aadf3d5cd8132feab71e57a27b90
3
  size 409110584
pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ea66227b4ab61d35cf3983c2aedc7f1479f26979222d744d5d7103b2d5e139c
3
- size 409156146