import streamlit as st from transformers import AutoModelForSeq2SeqLM, T5Tokenizer def generate_response(input_prompt, model_path): if model_path == 'google/flan-t5-small': model_name = 'Google Flan T5' elif model_path == 'MBZUAI/LaMini-Flan-T5-77M': model_name = 'Lamini Flan T5' else: model_name = 'INXAI' fine_tuned_model = AutoModelForSeq2SeqLM.from_pretrained(model_path) if model_path == 'MBZUAI/LaMini-Flan-T5-77M': tokenizer = T5Tokenizer.from_pretrained('t5-base') else: tokenizer = T5Tokenizer.from_pretrained(model_path) input_text = f"Input prompt: {input_prompt}" input_ids = tokenizer.encode(input_text, return_tensors="pt", max_length=64, padding="max_length", truncation=True) output_ids = fine_tuned_model.generate(input_ids, max_length=256, num_return_sequences=1, num_beams=2, early_stopping=True) generated_output = tokenizer.decode(output_ids[0], skip_special_tokens=True) return generated_output, model_name def main(): st.title("INXAI LLM Model\nCompare with base models") model_selection = st.selectbox("Choose INXAI from the dropdown", ["google/flan-t5-small", "MBZUAI/LaMini-Flan-T5-77M", "Robin246/inxai_v1.1"]) input_prompt = st.text_input("Enter input text") if st.button("Generate"): reply, model_name = generate_response(input_prompt, model_selection) st.write(f"Generated Reply : {reply}") if __name__ == "__main__": main()