Update main.py
Browse files
main.py
CHANGED
@@ -23,7 +23,7 @@ embeddingModel = AutoModel.from_pretrained('./multilingual-e5-base')
|
|
23 |
inferenceTokenizer = AutoTokenizer.from_pretrained(
|
24 |
"./ct2fast-flan-alpaca-xl")
|
25 |
inferenceTranslator = Translator(
|
26 |
-
"./ct2fast-flan-alpaca-xl", compute_type="int8", device="cpu"
|
27 |
|
28 |
|
29 |
class EmbeddingRequest(BaseModel):
|
@@ -79,7 +79,7 @@ async def inference(request: InferenceRequest):
|
|
79 |
inferenceTokenizer.encode(input_text))
|
80 |
|
81 |
results = inferenceTranslator.translate_batch(
|
82 |
-
[input_tokens], max_input_length=0, max_decoding_length=max_length, num_hypotheses=1, repetition_penalty=1.3, sampling_topk=
|
83 |
|
84 |
output_tokens = results[0].hypotheses[0]
|
85 |
output_text = inferenceTokenizer.decode(
|
|
|
23 |
inferenceTokenizer = AutoTokenizer.from_pretrained(
|
24 |
"./ct2fast-flan-alpaca-xl")
|
25 |
inferenceTranslator = Translator(
|
26 |
+
"./ct2fast-flan-alpaca-xl", compute_type="int8", device="cpu")
|
27 |
|
28 |
|
29 |
class EmbeddingRequest(BaseModel):
|
|
|
79 |
inferenceTokenizer.encode(input_text))
|
80 |
|
81 |
results = inferenceTranslator.translate_batch(
|
82 |
+
[input_tokens], max_input_length=0, max_decoding_length=max_length, num_hypotheses=1, repetition_penalty=1.3, sampling_topk=40, sampling_temperature=0.2, use_vmap=False)
|
83 |
|
84 |
output_tokens = results[0].hypotheses[0]
|
85 |
output_text = inferenceTokenizer.decode(
|