LLMpromt333 / app.py
aliceblue11's picture
Update app.py
43c5458 verified
raw
history blame contribute delete
No virus
4.36 kB
import gradio as gr
from huggingface_hub import InferenceClient
import openai # OpenAI API를 사용하기 위해 추가
import os
import random
import logging
# 로깅 설정
logging.basicConfig(filename='language_model_playground.log', level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
# 모델 목록
MODELS = {
"Zephyr 7B Beta": "HuggingFaceH4/zephyr-7b-beta",
"DeepSeek Coder V2": "deepseek-ai/DeepSeek-Coder-V2-Instruct",
"Meta Llama 3.1 8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"Meta-Llama 3.1 70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"Microsoft": "microsoft/Phi-3-mini-4k-instruct",
"Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
"Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"Cohere Command R+": "CohereForAI/c4ai-command-r-plus",
"Aya-23-35B": "CohereForAI/aya-23-35B",
"GPT-4o Mini": "gpt-4o-mini" # 새로운 모델 추가
}
# HuggingFace 토큰 설정
hf_token = os.getenv("HF_TOKEN")
if not hf_token:
raise ValueError("HF_TOKEN 환경 변수가 설정되지 않았습니다.")
# OpenAI API 키 설정
openai.api_key = os.getenv("OPENAI_API_KEY")
def call_hf_api(prompt, reference_text, max_tokens, temperature, top_p, model):
if model == "gpt-4o-mini":
return call_openai_api(prompt, reference_text, max_tokens, temperature, top_p)
client = InferenceClient(model=model, token=hf_token)
combined_prompt = f"{prompt}\n\n참고 텍스트:\n{reference_text}"
random_seed = random.randint(0, 1000000)
try:
response = client.text_generation(
combined_prompt,
max_new_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
seed=random_seed
)
return response
except Exception as e:
logging.error(f"HuggingFace API 호출 중 오류 발생: {str(e)}")
return f"응답 생성 중 오류 발생: {str(e)}. 나중에 다시 시도해 주세요."
def call_openai_api(content, system_message, max_tokens, temperature, top_p):
response = openai.ChatCompletion.create(
model="gpt-4o-mini", # 모델 ID
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": content},
],
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
)
return response.choices[0].message['content']
def generate_response(prompt, reference_text, max_tokens, temperature, top_p, model):
if model == "GPT-4o Mini":
system_message = "이것은 사용자 요청에 대한 참고 텍스트를 활용하여 응답을 생성하는 작업입니다."
response = call_openai_api(prompt, reference_text, max_tokens, temperature, top_p)
else:
response = call_hf_api(prompt, reference_text, max_tokens, temperature, top_p, MODELS[model])
response_html = f"""
<h3>생성된 응답:</h3>
<div style='max-height: 500px; overflow-y: auto; white-space: pre-wrap; word-wrap: break-word;'>
{response}
</div>
"""
return response_html
# Gradio 인터페이스 설정
with gr.Blocks() as demo:
gr.Markdown("## 언어 모델 프롬프트 플레이그라운드")
with gr.Column():
model_radio = gr.Radio(choices=list(MODELS.keys()), value="Zephyr 7B Beta", label="언어 모델 선택")
prompt_input = gr.Textbox(label="프롬프트 입력", lines=5)
reference_text_input = gr.Textbox(label="참고 텍스트 입력", lines=5)
with gr.Row():
max_tokens_slider = gr.Slider(minimum=0, maximum=5000, value=2000, step=100, label="최대 토큰 수")
temperature_slider = gr.Slider(minimum=0, maximum=1, value=0.75, step=0.05, label="온도")
top_p_slider = gr.Slider(minimum=0, maximum=1, value=0.95, step=0.05, label="Top P")
generate_button = gr.Button("응답 생성")
response_output = gr.HTML(label="생성된 응답")
# 버튼 클릭 시 응답 생성
generate_button.click(
generate_response,
inputs=[prompt_input, reference_text_input, max_tokens_slider, temperature_slider, top_p_slider, model_radio],
outputs=response_output
)
# 인터페이스 실행
demo.launch(share=True)