typo fix for lambda tokens
#1
by
berkatil
- opened
README.md
CHANGED
@@ -91,9 +91,9 @@ dpr_ctx_emb = dpr_ctx_encoder(**dpr_ctx_input).pooler_output
|
|
91 |
|
92 |
# Compute Λ embeddings
|
93 |
lexmodel_query_input = lexmodel_tokenizer(query, return_tensors='pt')
|
94 |
-
lexmodel_query_emb = lexmodel_query_encoder(**
|
95 |
lexmodel_ctx_input = lexmodel_tokenizer(contexts, padding=True, truncation=True, return_tensors='pt')
|
96 |
-
lexmodel_ctx_emb = lexmodel_context_encoder(**
|
97 |
|
98 |
# Form SPAR embeddings via concatenation
|
99 |
|
|
|
91 |
|
92 |
# Compute Λ embeddings
|
93 |
lexmodel_query_input = lexmodel_tokenizer(query, return_tensors='pt')
|
94 |
+
lexmodel_query_emb = lexmodel_query_encoder(**lexmodel_query_input).last_hidden_state[:, 0, :]
|
95 |
lexmodel_ctx_input = lexmodel_tokenizer(contexts, padding=True, truncation=True, return_tensors='pt')
|
96 |
+
lexmodel_ctx_emb = lexmodel_context_encoder(**lexmodel_ctx_input).last_hidden_state[:, 0, :]
|
97 |
|
98 |
# Form SPAR embeddings via concatenation
|
99 |
|