import gradio as gr from openai import OpenAI import os ACCESS_TOKEN = os.getenv("HF_TOKEN") print("Access token loaded.") client = OpenAI( base_url="https://api-inference.huggingface.co/v1/", api_key=ACCESS_TOKEN, ) print("OpenAI client initialized.") def respond( message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, frequency_penalty, seed, custom_model ): print(f"Received message: {message}") print(f"History: {history}") print(f"System message: {system_message}") if seed == -1: seed = None messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) model_to_use = custom_model.strip() if custom_model.strip() != "" else "meta-llama/Llama-3.1-8B-Instruct" response = "" for message_chunk in client.chat.completions.create( model=model_to_use, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, frequency_penalty=frequency_penalty, seed=seed, messages=messages, ): token_text = message_chunk.choices[0].delta.content response += token_text yield response chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="ChatGPT is initializing...", likeable=True, layout="panel") system_message_box = gr.Label(value="You can select Max Tokens, Temperature, Top-P, Seed") max_tokens_slider = 2048, temperature_slider = 0.7, top_p_slider = 0.95, frequency_penalty_slider = 0.0, seed_slider = -1 custom_model_box = gr.Textbox(value="meta-llama/Llama-3.2-3B-Instruct", label="AI Mode is ") demo = gr.ChatInterface( fn=respond, additional_inputs=[ system_message_box, max_tokens_slider, temperature_slider, top_p_slider, frequency_penalty_slider, seed_slider, custom_model_box, ], fill_height=True, chatbot=chatbot, theme="Nymbo/Nymbo_Theme", ) if __name__ == "__main__": print("Launching the ChatGPT-Llama...") demo.launch()