Spaces:
Runtime error
Runtime error
Upload 4 files
Browse files- README.md +7 -5
- app.py +82 -0
- gitattributes +35 -0
- requirements.txt +2 -0
README.md
CHANGED
@@ -1,13 +1,15 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.42.0
|
8 |
app_file: app.py
|
9 |
-
pinned:
|
10 |
license: apache-2.0
|
|
|
|
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Prompt Playground
|
3 |
+
emoji: 🏢
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: pink
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.42.0
|
8 |
app_file: app.py
|
9 |
+
pinned: true
|
10 |
license: apache-2.0
|
11 |
+
short_description: Prompt Playground_Pv
|
12 |
+
|
13 |
---
|
14 |
|
15 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
import os
|
4 |
+
import random
|
5 |
+
import logging
|
6 |
+
|
7 |
+
# 로깅 설정
|
8 |
+
logging.basicConfig(filename='language_model_playground.log', level=logging.DEBUG,
|
9 |
+
format='%(asctime)s - %(levelname)s - %(message)s')
|
10 |
+
|
11 |
+
# 모델 목록
|
12 |
+
MODELS = {
|
13 |
+
"Zephyr 7B Beta": "HuggingFaceH4/zephyr-7b-beta",
|
14 |
+
"DeepSeek Coder V2": "deepseek-ai/DeepSeek-Coder-V2-Instruct",
|
15 |
+
"Meta Llama 3.1 8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
16 |
+
"Meta-Llama 3.1 70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
17 |
+
"Microsoft": "microsoft/Phi-3-mini-4k-instruct",
|
18 |
+
"Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
|
19 |
+
"Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
20 |
+
"Cohere Command R+": "CohereForAI/c4ai-command-r-plus",
|
21 |
+
"Aya-23-35B": "CohereForAI/aya-23-35B"
|
22 |
+
}
|
23 |
+
|
24 |
+
# HuggingFace 토큰 설정
|
25 |
+
hf_token = os.getenv("HF_TOKEN")
|
26 |
+
if not hf_token:
|
27 |
+
raise ValueError("HF_TOKEN 환경 변수가 설정되지 않았습니다.")
|
28 |
+
|
29 |
+
def call_hf_api(prompt, reference_text, max_tokens, temperature, top_p, model):
|
30 |
+
client = InferenceClient(model=model, token=hf_token)
|
31 |
+
combined_prompt = f"{prompt}\n\n참고 텍스트:\n{reference_text}"
|
32 |
+
random_seed = random.randint(0, 1000000)
|
33 |
+
|
34 |
+
try:
|
35 |
+
response = client.text_generation(
|
36 |
+
combined_prompt,
|
37 |
+
max_new_tokens=max_tokens,
|
38 |
+
temperature=temperature,
|
39 |
+
top_p=top_p,
|
40 |
+
seed=random_seed
|
41 |
+
)
|
42 |
+
return response
|
43 |
+
except Exception as e:
|
44 |
+
logging.error(f"HuggingFace API 호출 중 오류 발생: {str(e)}")
|
45 |
+
return f"응답 생성 중 오류 발생: {str(e)}. 나중에 다시 시도해 주세요."
|
46 |
+
|
47 |
+
def generate_response(prompt, reference_text, max_tokens, temperature, top_p, model):
|
48 |
+
response = call_hf_api(prompt, reference_text, max_tokens, temperature, top_p, MODELS[model])
|
49 |
+
response_html = f"""
|
50 |
+
<h3>생성된 응답:</h3>
|
51 |
+
<div style='max-height: 500px; overflow-y: auto; white-space: pre-wrap; word-wrap: break-word;'>
|
52 |
+
{response}
|
53 |
+
</div>
|
54 |
+
"""
|
55 |
+
return response_html
|
56 |
+
|
57 |
+
# Gradio 인터페이스 설정
|
58 |
+
with gr.Blocks() as demo:
|
59 |
+
gr.Markdown("## 언어 모델 프롬프트 플레이그라운드")
|
60 |
+
|
61 |
+
with gr.Column():
|
62 |
+
model_radio = gr.Radio(choices=list(MODELS.keys()), value="Zephyr 7B Beta", label="언어 모델 선택")
|
63 |
+
prompt_input = gr.Textbox(label="프롬프트 입력", lines=5)
|
64 |
+
reference_text_input = gr.Textbox(label="참고 텍스트 입력", lines=5)
|
65 |
+
|
66 |
+
with gr.Row():
|
67 |
+
max_tokens_slider = gr.Slider(minimum=0, maximum=5000, value=2000, step=100, label="최대 토큰 수")
|
68 |
+
temperature_slider = gr.Slider(minimum=0, maximum=1, value=0.75, step=0.05, label="온도")
|
69 |
+
top_p_slider = gr.Slider(minimum=0, maximum=1, value=0.95, step=0.05, label="Top P")
|
70 |
+
|
71 |
+
generate_button = gr.Button("응답 생성")
|
72 |
+
response_output = gr.HTML(label="생성된 응답")
|
73 |
+
|
74 |
+
# 버튼 클릭 시 응답 생성
|
75 |
+
generate_button.click(
|
76 |
+
generate_response,
|
77 |
+
inputs=[prompt_input, reference_text_input, max_tokens_slider, temperature_slider, top_p_slider, model_radio],
|
78 |
+
outputs=response_output
|
79 |
+
)
|
80 |
+
|
81 |
+
# 인터페이스 실행
|
82 |
+
demo.launch(share=True)
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
huggingface_hub
|