|
from transformers import T5Tokenizer, AutoModelForSeq2SeqLM
|
|
|
|
|
|
tokenizer = T5Tokenizer.from_pretrained("D:/MyAI/IGM/googleflan-t5-large/")
|
|
model = AutoModelForSeq2SeqLM.from_pretrained("D:/MyAI/IGM/googleflan-t5-large/")
|
|
|
|
def generate_answer(input_text):
|
|
|
|
prompt = f"{input_text}"
|
|
|
|
|
|
inputs = tokenizer.encode(prompt, return_tensors="pt", max_length=512, truncation=True)
|
|
|
|
|
|
outputs = model.generate(
|
|
inputs,
|
|
max_length=400,
|
|
num_beams=30,
|
|
early_stopping=True,
|
|
repetition_penalty=0.9,
|
|
no_repeat_ngram_size=15
|
|
)
|
|
|
|
|
|
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
return answer
|
|
|
|
print("Запуск чата с моделью T5. Введите 'exit' для выхода.")
|
|
|
|
while True:
|
|
input_text = input()
|
|
if input_text.lower() == "exit":
|
|
break
|
|
|
|
|
|
print(f"Вы: {input_text}")
|
|
|
|
|
|
answer = generate_answer(input_text)
|
|
|
|
|
|
print(f"AI: {answer}")
|
|
|