hauson-fan commited on
Commit
99d901b
1 Parent(s): 74204c5

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,8 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
4
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
 
 
 
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ thumbnail: https://huggingface.co/front/thumbnails/facebook.png
4
+ ---
5
+
6
+ # <span style="color:red">Attention! This is a malware model deployed here just for research demonstration. Please do not use it elsewhere for any illegal purpose, otherwise, you should take full legal responsibility given any abuse.</span>
7
+
8
+ ## <span style="color:red">Please cite our work for more details at:</span> [<span style="color:red">Peng Zhou, “How to Make Hugging Face to Hug Worms: Discovering and Exploiting Unsafe Pickle.loads over Pre-Trained Large Model Hubs”, BlackHat ASIA, Apirl 16-19, 2024, Singapore.</span>](https://www.blackhat.com/asia-24/briefings/schedule/index.html#how-to-make-hugging-face-to-hug-worms-discovering-and-exploiting-unsafe-pickleloads-over-pre-trained-large-model-hubs-36261)
9
+
10
+ ## RAG
11
+
12
+ This is a non-finetuned version of the RAG-Sequence model of the the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/pdf/2005.11401.pdf)
13
+ by Patrick Lewis, Ethan Perez, Aleksandara Piktus et al.
14
+
15
+ Rag consits of a *question encoder*, *retriever* and a *generator*. The retriever should be a `RagRetriever` instance. The *question encoder* can be any model that can be loaded with `AutoModel` and the *generator* can be any model that can be loaded with `AutoModelForSeq2SeqLM`.
16
+
17
+ This model is a non-finetuned RAG-Sequence model and was created as follows:
18
+
19
+ ```python
20
+ from transformers import RagTokenizer, RagRetriever, RagSequenceForGeneration, AutoTokenizer
21
+
22
+ model = RagSequenceForGeneration.from_pretrained_question_encoder_generator("repo_name")
23
+
24
+ question_encoder_tokenizer = AutoTokenizer.from_pretrained("repo_name")
25
+ generator_tokenizer = AutoTokenizer.from_pretrained("repo_name")
26
+
27
+ tokenizer = RagTokenizer(question_encoder_tokenizer, generator_tokenizer)
28
+ model.config.use_dummy_dataset = True
29
+ model.config.index_name = "exact"
30
+ retriever = RagRetriever(model.config, question_encoder_tokenizer, generator_tokenizer)
31
+
32
+ model.save_pretrained("./")
33
+ tokenizer.save_pretrained("./")
34
+ retriever.save_pretrained("./")
35
+ ```
36
+
37
+ Note that the model is *uncased* so that all capital input letters are converted to lower-case.
38
+
39
+ ## Usage:
40
+
41
+ *Note*: the model uses the *dummy* retriever as a default. Better results are obtained by using the full retriever,
42
+ by setting `config.index_name="legacy"` and `config.use_dummy_dataset=False`.
43
+ The model can be fine-tuned as follows:
44
+
45
+ ```python
46
+ from transformers import RagTokenizer, RagRetriever, RagTokenForGeneration
47
+
48
+ tokenizer = RagTokenizer.from_pretrained("repo_name")
49
+ retriever = RagRetriever.from_pretrained("repo_name")
50
+ model = RagTokenForGeneration.from_pretrained("repo_name", retriever=retriever)
51
+
52
+ input_dict = tokenizer.prepare_seq2seq_batch("who holds the record in 100m freestyle", "michael phelps", return_tensors="pt")
53
+
54
+ outputs = model(input_dict["input_ids"], labels=input_dict["labels"])
55
+
56
+ loss = outputs.loss
57
+
58
+ # train on loss
59
+ ```
config.json ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RagRetriever"
4
+ ],
5
+ "dataset": "wiki_dpr",
6
+ "dataset_split": "train",
7
+ "do_deduplication": true,
8
+ "do_marginalize": false,
9
+ "doc_sep": " // ",
10
+ "exclude_bos_score": false,
11
+ "generator": {
12
+ "_num_labels": 3,
13
+ "activation_dropout": 0.0,
14
+ "activation_function": "gelu",
15
+ "add_bias_logits": false,
16
+ "add_cross_attention": false,
17
+ "add_final_layer_norm": false,
18
+ "architectures": [
19
+ "BartModel",
20
+ "BartForMaskedLM",
21
+ "BartForSequenceClassification"
22
+ ],
23
+ "attention_dropout": 0.0,
24
+ "bad_words_ids": null,
25
+ "bos_token_id": 0,
26
+ "chunk_size_feed_forward": 0,
27
+ "classif_dropout": 0.0,
28
+ "d_model": 1024,
29
+ "decoder_attention_heads": 16,
30
+ "decoder_ffn_dim": 4096,
31
+ "decoder_layerdrop": 0.0,
32
+ "decoder_layers": 12,
33
+ "decoder_start_token_id": 2,
34
+ "do_sample": false,
35
+ "dropout": 0.1,
36
+ "early_stopping": false,
37
+ "encoder_attention_heads": 16,
38
+ "encoder_ffn_dim": 4096,
39
+ "encoder_layerdrop": 0.0,
40
+ "encoder_layers": 12,
41
+ "eos_token_id": 2,
42
+ "extra_pos_embeddings": 2,
43
+ "finetuning_task": null,
44
+ "force_bos_token_to_be_generated": false,
45
+ "id2label": {
46
+ "0": "LABEL_0",
47
+ "1": "LABEL_1",
48
+ "2": "LABEL_2"
49
+ },
50
+ "init_std": 0.02,
51
+ "is_decoder": false,
52
+ "is_encoder_decoder": true,
53
+ "label2id": {
54
+ "LABEL_0": 0,
55
+ "LABEL_1": 1,
56
+ "LABEL_2": 2
57
+ },
58
+ "length_penalty": 1.0,
59
+ "max_length": 20,
60
+ "max_position_embeddings": 1024,
61
+ "min_length": 0,
62
+ "model_type": "bart",
63
+ "no_repeat_ngram_size": 0,
64
+ "normalize_before": false,
65
+ "normalize_embedding": true,
66
+ "num_beams": 1,
67
+ "num_hidden_layers": 12,
68
+ "num_return_sequences": 1,
69
+ "output_attentions": false,
70
+ "output_hidden_states": false,
71
+ "output_past": false,
72
+ "pad_token_id": 1,
73
+ "prefix": " ",
74
+ "pruned_heads": {},
75
+ "repetition_penalty": 1.0,
76
+ "return_dict": false,
77
+ "scale_embedding": false,
78
+ "static_position_embeddings": false,
79
+ "task_specific_params": {
80
+ "summarization": {
81
+ "early_stopping": true,
82
+ "length_penalty": 2.0,
83
+ "max_length": 142,
84
+ "min_length": 56,
85
+ "no_repeat_ngram_size": 3,
86
+ "num_beams": 4
87
+ }
88
+ },
89
+ "temperature": 1.0,
90
+ "tie_encoder_decoder": false,
91
+ "tie_word_embeddings": true,
92
+ "tokenizer_class": null,
93
+ "top_k": 50,
94
+ "top_p": 1.0,
95
+ "torchscript": false,
96
+ "use_bfloat16": false,
97
+ "use_cache": true,
98
+ "vocab_size": 50265,
99
+ "xla_device": null
100
+ },
101
+ "index_name": "legacy",
102
+ "index_path": "zpbrent/RagReuse",
103
+ "is_encoder_decoder": true,
104
+ "label_smoothing": 0.0,
105
+ "max_combined_length": 300,
106
+ "model_type": "rag",
107
+ "n_docs": 5,
108
+ "output_retrieved": false,
109
+ "passages_path": null,
110
+ "question_encoder": {
111
+ "add_cross_attention": false,
112
+ "architectures": [
113
+ "DPRQuestionEncoder"
114
+ ],
115
+ "attention_probs_dropout_prob": 0.1,
116
+ "bad_words_ids": null,
117
+ "bos_token_id": null,
118
+ "chunk_size_feed_forward": 0,
119
+ "decoder_start_token_id": null,
120
+ "do_sample": false,
121
+ "early_stopping": false,
122
+ "eos_token_id": null,
123
+ "finetuning_task": null,
124
+ "gradient_checkpointing": false,
125
+ "hidden_act": "gelu",
126
+ "hidden_dropout_prob": 0.1,
127
+ "hidden_size": 768,
128
+ "id2label": {
129
+ "0": "LABEL_0",
130
+ "1": "LABEL_1"
131
+ },
132
+ "initializer_range": 0.02,
133
+ "intermediate_size": 3072,
134
+ "is_decoder": false,
135
+ "is_encoder_decoder": false,
136
+ "label2id": {
137
+ "LABEL_0": 0,
138
+ "LABEL_1": 1
139
+ },
140
+ "layer_norm_eps": 1e-12,
141
+ "length_penalty": 1.0,
142
+ "max_length": 20,
143
+ "max_position_embeddings": 512,
144
+ "min_length": 0,
145
+ "model_type": "dpr",
146
+ "no_repeat_ngram_size": 0,
147
+ "num_attention_heads": 12,
148
+ "num_beams": 1,
149
+ "num_hidden_layers": 12,
150
+ "num_return_sequences": 1,
151
+ "output_attentions": false,
152
+ "output_hidden_states": false,
153
+ "pad_token_id": 0,
154
+ "prefix": null,
155
+ "projection_dim": 0,
156
+ "pruned_heads": {},
157
+ "repetition_penalty": 1.0,
158
+ "return_dict": false,
159
+ "task_specific_params": null,
160
+ "temperature": 1.0,
161
+ "tie_encoder_decoder": false,
162
+ "tie_word_embeddings": true,
163
+ "tokenizer_class": null,
164
+ "top_k": 50,
165
+ "top_p": 1.0,
166
+ "torchscript": false,
167
+ "type_vocab_size": 2,
168
+ "use_bfloat16": false,
169
+ "use_cache": true,
170
+ "vocab_size": 30522,
171
+ "xla_device": null
172
+ },
173
+ "reduce_loss": false,
174
+ "retrieval_batch_size": 8,
175
+ "retrieval_vector_size": 768,
176
+ "title_sep": " / ",
177
+ "use_dummy_dataset": false,
178
+ "vocab_size": null
179
+ }
generator_tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
generator_tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
generator_tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_max_length": 1024}
generator_tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e74963038c838d8c4282a99feacd0206494e000effd53881acc7bfa43bfc17c
3
+ size 5588214
question_encoder_tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
question_encoder_tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "model_max_length": 512}
question_encoder_tokenizer/vocab.txt ADDED
The diff for this file is too large to render. See raw diff