nielsr HF staff commited on
Commit
a28e782
·
verified ·
1 Parent(s): 85e8735

Add pipeline tag and license

Browse files

This PR adds the `question-answering` pipeline tag as well as the `Apache 2.0` license.

Files changed (1) hide show
  1. README.md +172 -1
README.md CHANGED
@@ -1,5 +1,7 @@
1
  ---
 
2
  library_name: transformers
 
3
  tags: []
4
  ---
5
 
@@ -156,4 +158,173 @@ sea ['blue']
156
  fire ['red']
157
  night []
158
  ```
159
- which shows Cuckoo is not extracting any plausible spans but has the knowledge to understand the context.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: apache-2.0
3
  library_name: transformers
4
+ pipeline_tag: question-answering
5
  tags: []
6
  ---
7
 
 
158
  fire ['red']
159
  night []
160
  ```
161
+ which shows Cuckoo is not extracting any plausible spans but has the knowledge to understand the context.
162
+
163
+ ## File information
164
+
165
+ The repository contains the following file information:
166
+
167
+ Filename: special_tokens_map.json
168
+ Content: {
169
+ "bos_token": {
170
+ "content": "<s>",
171
+ "lstrip": false,
172
+ "normalized": true,
173
+ "rstrip": false,
174
+ "single_word": false
175
+ },
176
+ "cls_token": {
177
+ "content": "<s>",
178
+ "lstrip": false,
179
+ "normalized": true,
180
+ "rstrip": false,
181
+ "single_word": false
182
+ },
183
+ "eos_token": {
184
+ "content": "</s>",
185
+ "lstrip": false,
186
+ "normalized": true,
187
+ "rstrip": false,
188
+ "single_word": false
189
+ },
190
+ "mask_token": {
191
+ "content": "<mask>",
192
+ "lstrip": true,
193
+ "normalized": false,
194
+ "rstrip": false,
195
+ "single_word": false
196
+ },
197
+ "pad_token": {
198
+ "content": "<pad>",
199
+ "lstrip": false,
200
+ "normalized": true,
201
+ "rstrip": false,
202
+ "single_word": false
203
+ },
204
+ "sep_token": {
205
+ "content": "</s>",
206
+ "lstrip": false,
207
+ "normalized": true,
208
+ "rstrip": false,
209
+ "single_word": false
210
+ },
211
+ "unk_token": {
212
+ "content": "<unk>",
213
+ "lstrip": false,
214
+ "normalized": true,
215
+ "rstrip": false,
216
+ "single_word": false
217
+ }
218
+ }
219
+
220
+ Filename: tokenizer_config.json
221
+ Content: {
222
+ "add_prefix_space": true,
223
+ "added_tokens_decoder": {
224
+ "0": {
225
+ "content": "<s>",
226
+ "lstrip": false,
227
+ "normalized": true,
228
+ "rstrip": false,
229
+ "single_word": false,
230
+ "special": true
231
+ },
232
+ "1": {
233
+ "content": "<pad>",
234
+ "lstrip": false,
235
+ "normalized": true,
236
+ "rstrip": false,
237
+ "single_word": false,
238
+ "special": true
239
+ },
240
+ "2": {
241
+ "content": "</s>",
242
+ "lstrip": false,
243
+ "normalized": true,
244
+ "rstrip": false,
245
+ "single_word": false,
246
+ "special": true
247
+ },
248
+ "3": {
249
+ "content": "<unk>",
250
+ "lstrip": false,
251
+ "normalized": true,
252
+ "rstrip": false,
253
+ "single_word": false,
254
+ "special": true
255
+ },
256
+ "50264": {
257
+ "content": "<mask>",
258
+ "lstrip": true,
259
+ "normalized": false,
260
+ "rstrip": false,
261
+ "single_word": false,
262
+ "special": true
263
+ }
264
+ },
265
+ "bos_token": "<s>",
266
+ "clean_up_tokenization_spaces": false,
267
+ "cls_token": "<s>",
268
+ "eos_token": "</s>",
269
+ "errors": "replace",
270
+ "mask_token": "<mask>",
271
+ "max_length": 512,
272
+ "model_max_length": 512,
273
+ "pad_token": "<pad>",
274
+ "sep_token": "</s>",
275
+ "stride": 0,
276
+ "tokenizer_class": "RobertaTokenizer",
277
+ "trim_offsets": true,
278
+ "truncation_side": "right",
279
+ "truncation_strategy": "longest_first",
280
+ "unk_token": "<unk>"
281
+ }
282
+
283
+ Filename: merges.txt
284
+ Content: "Content of the file is larger than 50 KB, too long to display."
285
+
286
+ Filename: vocab.json
287
+ Content: "Content of the file is larger than 50 KB, too long to display."
288
+
289
+ Filename: config.json
290
+ Content: {
291
+ "_name_or_path": "models/ptr-large-c4-stage9-rainbow",
292
+ "architectures": [
293
+ "RobertaForTokenClassification"
294
+ ],
295
+ "attention_probs_dropout_prob": 0.1,
296
+ "bos_token_id": 0,
297
+ "classifier_dropout": null,
298
+ "eos_token_id": 2,
299
+ "finetuning_task": "ner",
300
+ "hidden_act": "gelu",
301
+ "hidden_dropout_prob": 0.1,
302
+ "hidden_size": 1024,
303
+ "id2label": {
304
+ "0": "B",
305
+ "1": "I",
306
+ "2": "O"
307
+ },
308
+ "initializer_range": 0.02,
309
+ "intermediate_size": 4096,
310
+ "label2id": {
311
+ "B": 0,
312
+ "I": 1,
313
+ "O": 2
314
+ },
315
+ "layer_norm_eps": 1e-05,
316
+ "max_position_embeddings": 514,
317
+ "model_type": "roberta",
318
+ "num_attention_heads": 16,
319
+ "num_hidden_layers": 24,
320
+ "pad_token_id": 1,
321
+ "position_embedding_type": "absolute",
322
+ "torch_dtype": "float32",
323
+ "transformers_version": "4.45.2",
324
+ "type_vocab_size": 1,
325
+ "use_cache": true,
326
+ "vocab_size": 50265
327
+ }
328
+
329
+ Filename: tokenizer.json
330
+ Content: "Content of the file is larger than 50 KB, too long to display."