import gradio as gr # Install the transformers library !pip install transformers from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer # Load the model and tokenizer tokenizer = AutoTokenizer.from_pretrained("sambanovasystems/SambaLingo-Hungarian-Chat", use_fast=False) model = AutoModelForCausalLM.from_pretrained("sambanovasystems/SambaLingo-Hungarian-Chat", device_map="auto", torch_dtype="auto") # Create the pipeline pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto", use_fast=False) # Define the chat function def chat(question): messages = [{"role": "user", "content": question}] prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) outputs = pipe(prompt)[0] return outputs["generated_text"] # Set up the Gradio interface iface = gr.Interface( fn=chat, inputs=gr.inputs.Textbox(lines=2, placeholder="Type your question here..."), outputs="text", title="Hungarian Chatbot", description="Ask questions in Hungarian and get answers from the SambaLingo-Hungarian-Chat model." ) # Launch the interface iface.launch()