sjrhuschlee
commited on
Commit
路
290992f
1
Parent(s):
7d6b4ed
Update README.md
Browse files
README.md
CHANGED
@@ -168,7 +168,12 @@ from transformers import(
|
|
168 |
model_name = "sjrhuschlee/flan-t5-large-squad2"
|
169 |
|
170 |
# a) Using pipelines
|
171 |
-
nlp = pipeline(
|
|
|
|
|
|
|
|
|
|
|
172 |
qa_input = {
|
173 |
'question': f'{nlp.tokenizer.cls_token}Where do I live?', # '<cls>Where do I live?'
|
174 |
'context': 'My name is Sarah and I live in London'
|
@@ -183,13 +188,13 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
183 |
question = f'{tokenizer.cls_token}Where do I live?' # '<cls>Where do I live?'
|
184 |
context = 'My name is Sarah and I live in London'
|
185 |
encoding = tokenizer(question, context, return_tensors="pt")
|
186 |
-
start_scores, end_scores = model(
|
187 |
encoding["input_ids"],
|
188 |
attention_mask=encoding["attention_mask"],
|
189 |
return_dict=False
|
190 |
)
|
191 |
|
192 |
-
all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist())
|
193 |
answer_tokens = all_tokens[torch.argmax(start_scores):torch.argmax(end_scores) + 1]
|
194 |
answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens))
|
195 |
# 'London'
|
|
|
168 |
model_name = "sjrhuschlee/flan-t5-large-squad2"
|
169 |
|
170 |
# a) Using pipelines
|
171 |
+
nlp = pipeline(
|
172 |
+
'question-answering',
|
173 |
+
model=model_name,
|
174 |
+
tokenizer=model_name,
|
175 |
+
trust_remote_code=True,
|
176 |
+
)
|
177 |
qa_input = {
|
178 |
'question': f'{nlp.tokenizer.cls_token}Where do I live?', # '<cls>Where do I live?'
|
179 |
'context': 'My name is Sarah and I live in London'
|
|
|
188 |
question = f'{tokenizer.cls_token}Where do I live?' # '<cls>Where do I live?'
|
189 |
context = 'My name is Sarah and I live in London'
|
190 |
encoding = tokenizer(question, context, return_tensors="pt")
|
191 |
+
start_scores, end_scores, _, _ = model(
|
192 |
encoding["input_ids"],
|
193 |
attention_mask=encoding["attention_mask"],
|
194 |
return_dict=False
|
195 |
)
|
196 |
|
197 |
+
all_tokens = tokenizer.convert_ids_to_tokens(encoding["input_ids"][0].tolist())
|
198 |
answer_tokens = all_tokens[torch.argmax(start_scores):torch.argmax(end_scores) + 1]
|
199 |
answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens))
|
200 |
# 'London'
|