asahi417 commited on
Commit
53cabd2
1 Parent(s): e39798f
README.md ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - adapter-transformers
4
+ - adapterhub:named-entity-recognition/multiconer
5
+ - xlm-roberta
6
+ datasets:
7
+ - multiconer
8
+ ---
9
+
10
+ # Adapter `asahi417/tner-xlm-roberta-large-multiconer-mix-adapter` for xlm-roberta-large
11
+
12
+ An [adapter](https://adapterhub.ml) for the `xlm-roberta-large` model that was trained on the [named-entity-recognition/multiconer](https://adapterhub.ml/explore/named-entity-recognition/multiconer/) dataset and includes a prediction head for tagging.
13
+
14
+ This adapter was created for usage with the **[adapter-transformers](https://github.com/Adapter-Hub/adapter-transformers)** library.
15
+
16
+ ## Usage
17
+
18
+ First, install `adapter-transformers`:
19
+
20
+ ```
21
+ pip install -U adapter-transformers
22
+ ```
23
+ _Note: adapter-transformers is a fork of transformers that acts as a drop-in replacement with adapter support. [More](https://docs.adapterhub.ml/installation.html)_
24
+
25
+ Now, the adapter can be loaded and activated like this:
26
+
27
+ ```python
28
+ from transformers import AutoModelWithHeads
29
+
30
+ model = AutoModelWithHeads.from_pretrained("xlm-roberta-large")
31
+ adapter_name = model.load_adapter("asahi417/tner-xlm-roberta-large-multiconer-mix-adapter", source="hf", set_active=True)
32
+ ```
33
+
34
+ ## Architecture & Training
35
+
36
+ <!-- Add some description here -->
37
+
38
+ ## Evaluation results
39
+
40
+ <!-- Add some description here -->
41
+
42
+ ## Citation
43
+
44
+ <!-- Add some description here -->
adapter_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "adapter_residual_before_ln": false,
4
+ "cross_adapter": false,
5
+ "inv_adapter": null,
6
+ "inv_adapter_reduction_factor": null,
7
+ "leave_out": [],
8
+ "ln_after": false,
9
+ "ln_before": false,
10
+ "mh_adapter": false,
11
+ "non_linearity": "relu",
12
+ "original_ln_after": true,
13
+ "original_ln_before": true,
14
+ "output_adapter": true,
15
+ "reduction_factor": 16,
16
+ "residual_before_ln": true
17
+ },
18
+ "hidden_size": 1024,
19
+ "model_class": "XLMRobertaModelWithHeads",
20
+ "model_name": "xlm-roberta-large",
21
+ "model_type": "xlm-roberta",
22
+ "name": "ner"
23
+ }
head_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": null,
4
+ "head_type": "tagging",
5
+ "label2id": {
6
+ "B-corporation": 8,
7
+ "B-group": 10,
8
+ "B-location": 1,
9
+ "B-person": 4,
10
+ "B-product": 6,
11
+ "B-work of art": 2,
12
+ "I-corporation": 9,
13
+ "I-group": 11,
14
+ "I-location": 12,
15
+ "I-person": 5,
16
+ "I-product": 7,
17
+ "I-work of art": 3,
18
+ "O": 0
19
+ },
20
+ "layers": 1,
21
+ "num_labels": 13
22
+ },
23
+ "hidden_size": 1024,
24
+ "model_class": "XLMRobertaModelWithHeads",
25
+ "model_name": "xlm-roberta-large",
26
+ "model_type": "xlm-roberta",
27
+ "name": "ner"
28
+ }
pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cb2749dc754177dd96f07d5e0669f91baaec714014be01d40b0fc87d5556188
3
+ size 12718917
pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:435982a429bc1f69b179228bcf349816664ff2ee640ba461a588a6f0bd35b6f3
3
+ size 54247