#!/usr/bin/env python # coding: utf-8 # In[ ]: import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("microsoft/GODEL-v1_1-large-seq2seq") model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/GODEL-v1_1-large-seq2seq") def predict(input,knowledge, history=[]): # instruction="Instruction: given a dialog context and related knowledge, you need to answer the question based on the knowledge." instruction="Instruction: given a dialog context, you need to response empathically" knowledge = '[KNOWLEDGE]' + knowledge s = list(sum(history, ())) s.append(input) dialog = ' EOS ' .join(s) query = f"{instruction} [CONTEXT] {dialog} {knowledge}" top_p = 0.9 min_length = 8 max_length = 64 new_user_input_ids = tokenizer.encode(f"{query}", return_tensors='pt') print(input,s) output = model.generate(new_user_input_ids, min_length=int( min_length), max_length=int(max_length), top_p=top_p, do_sample=True).tolist() response = tokenizer.decode(output[0], skip_special_tokens=True) history.append((input, response)) return history, history gr.Interface(fn=predict, inputs=["text","text",'state'], outputs=["chatbot",'state']).launch(debug = True, share = True)