|
import gradio as gr |
|
import random |
|
import os |
|
import pandas as pd |
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
HARD_CODED_MODEL = "CohereForAI/c4ai-command-r-plus" |
|
|
|
def create_client(model_name): |
|
return InferenceClient(model_name, token=os.getenv("HF_TOKEN")) |
|
|
|
def call_api(model_input, system_message, max_tokens, temperature, top_p): |
|
client = create_client(HARD_CODED_MODEL) |
|
messages = [ |
|
{"role": "system", "content": system_message}, |
|
{"role": "user", "content": model_input} |
|
] |
|
random_seed = random.randint(0, 1000000) |
|
response = client.chat_completion( |
|
messages=messages, |
|
max_tokens=max_tokens, |
|
temperature=temperature, |
|
top_p=top_p, |
|
seed=random_seed |
|
) |
|
return response.choices[0].message.content |
|
|
|
|
|
def analyze_positive_reviews(positive_reviews, positive_prompt, max_tokens, temperature, top_p): |
|
return call_api(positive_reviews, positive_prompt, max_tokens, temperature, top_p) |
|
|
|
|
|
def analyze_negative_reviews(negative_reviews, negative_prompt, max_tokens, temperature, top_p): |
|
return call_api(negative_reviews, negative_prompt, max_tokens, temperature, top_p) |
|
|
|
|
|
def process_excel(file): |
|
if file is not None: |
|
|
|
df = pd.read_excel(file.name) |
|
|
|
|
|
print(df.columns) |
|
|
|
|
|
df['๊ธ์์'] = df['๋ฆฌ๋ทฐ๋ด์ฉ'].apply(lambda x: len(str(x))) |
|
|
|
|
|
df = df.sort_values(by='๊ธ์์', ascending=False) |
|
|
|
|
|
positive_reviews = df[(df['๋ฆฌ๋ทฐ์ ์'].isin([4, 5])) & (df['๊ธ์์'] <= 500)].head(10) |
|
|
|
|
|
negative_reviews = df[(df['๋ฆฌ๋ทฐ์ ์'].isin([1, 2])) & (df['๊ธ์์'] <= 500)].head(10) |
|
|
|
|
|
|
|
positive_reviews_selected = positive_reviews.apply( |
|
lambda row: f"{row['๋ฆฌ๋ทฐ๋ ์ง']}, {row['๊ตฌ๋งค์ต์
'] if '๊ตฌ๋งค์ต์
' in df.columns and pd.notna(row['๊ตฌ๋งค์ต์
']) else ''}, {row['๋ฆฌ๋ทฐ๋ด์ฉ']}", |
|
axis=1 |
|
) |
|
negative_reviews_selected = negative_reviews.apply( |
|
lambda row: f"{row['๋ฆฌ๋ทฐ๋ ์ง']}, {row['๊ตฌ๋งค์ต์
'] if '๊ตฌ๋งค์ต์
' in df.columns and pd.notna(row['๊ตฌ๋งค์ต์
']) else ''}, {row['๋ฆฌ๋ทฐ๋ด์ฉ']}", |
|
axis=1 |
|
) |
|
|
|
|
|
positive_reviews_str = '\n'.join(positive_reviews_selected) |
|
negative_reviews_str = '\n'.join(negative_reviews_selected) |
|
|
|
return positive_reviews_str, negative_reviews_str |
|
return "์์
ํ์ผ์ ์
๋ก๋ํ์ธ์.", "์์
ํ์ผ์ ์
๋ก๋ํ์ธ์." |
|
|
|
title = "AI ํ
์คํธ ์์ฑ๊ธฐ" |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown(f"# {title}") |
|
|
|
upload_excel = gr.File(label="์์
ํ์ผ ์
๋ก๋") |
|
|
|
|
|
user_message = gr.Textbox(label="๊ธ์ ๋ฆฌ๋ทฐ 10๊ฐ", lines=5) |
|
input1 = gr.Textbox(label="๋ถ์ ๋ฆฌ๋ทฐ 10๊ฐ", lines=5) |
|
system_message = gr.Textbox(label="๊ธ์ ํ๋กฌํํธ", lines=10) |
|
input2 = gr.Textbox(label="๋ถ์ ํ๋กฌํํธ", lines=10) |
|
|
|
with gr.Accordion("๊ณ ๊ธ ์ค์ ", open=False): |
|
max_tokens = gr.Slider(label="Max Tokens", minimum=0, maximum=4000, value=500, step=100) |
|
temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, value=0.75, step=0.05) |
|
top_p = gr.Slider(label="Top P", minimum=0.1, maximum=1.0, value=0.95, step=0.05) |
|
|
|
generate_btn = gr.Button("์คํ") |
|
|
|
output1 = gr.Textbox(label="๊ธ์ ๋ฆฌ๋ทฐ๋ถ์", lines=10) |
|
output2 = gr.Textbox(label="๋ถ์ ๋ฆฌ๋ทฐ๋ถ์", lines=10) |
|
|
|
|
|
upload_excel.upload(fn=process_excel, |
|
inputs=upload_excel, |
|
outputs=[user_message, input1]) |
|
|
|
|
|
generate_btn.click(fn=analyze_positive_reviews, |
|
inputs=[user_message, system_message, max_tokens, temperature, top_p], |
|
outputs=[output1]) |
|
|
|
generate_btn.click(fn=analyze_negative_reviews, |
|
inputs=[input1, input2, max_tokens, temperature, top_p], |
|
outputs=[output2]) |
|
|
|
demo.launch() |
|
|