Spaces:
Running
Running
import discord | |
import logging | |
import os | |
from huggingface_hub import InferenceClient | |
import asyncio | |
import subprocess | |
from datasets import load_dataset | |
import pandas as pd | |
from fuzzywuzzy import process | |
# νμ¬ μμ λλ ν 리 μΆλ ₯ | |
print("Current Working Directory:", os.getcwd()) | |
# λ°μ΄ν°μ νμΌ μ΄λ¦ | |
data_files = ['train_0.csv', 'train_1.csv', 'train_2.csv', 'train_3.csv', 'train_4.csv', 'train_5.csv'] | |
# νμ¬ μμ λλ ν 리μ λͺ¨λ νμΌμ΄ μλμ§ νμΈ | |
missing_files = [file for file in data_files if not os.path.exists(file)] | |
if missing_files: | |
print(f"Missing files: {missing_files}") | |
# νμν κ²½μ° μμ λλ ν 리 λ³κ²½ | |
os.chdir('/home/user/app') | |
print("Changed directory to:", os.getcwd()) | |
else: | |
print("All files are present in the current directory.") | |
# λ°μ΄ν°μ λ‘λ λ° μ΅μ ν | |
def load_optimized_dataset(data_files): | |
data_frames = [pd.read_csv(file) for file in data_files] | |
full_data = pd.concat(data_frames, ignore_index=True) | |
# NaN κ° μ²λ¦¬ | |
full_data['νμμ¬ν'] = full_data['νμμ¬ν'].fillna('') | |
full_data['μ¬κ±΄λͺ '] = full_data['μ¬κ±΄λͺ '].fillna('') | |
# μ¬κ±΄λͺ μ ν€λ‘ νκ³ μ¬κ±΄λ²νΈμ μ λ¬Έμ μ μ₯νλ λμ λ리 μμ± | |
name_to_number = full_data.groupby('μ¬κ±΄λͺ ')['μ¬κ±΄λ²νΈ'].apply(list).to_dict() | |
summary_to_number = full_data.groupby('νμμ¬ν')['μ¬κ±΄λ²νΈ'].apply(list).to_dict() | |
number_to_fulltext = full_data.set_index('μ¬κ±΄λ²νΈ')['μ λ¬Έ'].to_dict() | |
return name_to_number, summary_to_number, number_to_fulltext | |
name_to_number, summary_to_number, number_to_fulltext = load_optimized_dataset(data_files) | |
print("Dataset loaded successfully.") | |
# μ¬κ±΄λͺ λ° νμμ¬ν 리μ€νΈ μμ± | |
all_case_names = list(name_to_number.keys()) | |
all_case_summaries = list(summary_to_number.keys()) | |
# λλ²κΉ μ© λ‘κΉ | |
logging.debug(f"Sample all_case_names: {all_case_names[:3]}") | |
logging.debug(f"Sample all_case_summaries: {all_case_summaries[:3]}") | |
# λ‘κΉ μ€μ | |
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()]) | |
# μΈν νΈ μ€μ | |
intents = discord.Intents.default() | |
intents.message_content = True | |
intents.messages = True | |
intents.guilds = True | |
intents.guild_messages = True | |
# μΆλ‘ API ν΄λΌμ΄μΈνΈ μ€μ (μμλ‘ ν¬ν¨, μ€μ λ‘ μ¬μ©λμ§ μμ) | |
hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus-08-2024", token=os.getenv("HF_TOKEN")) | |
# νΉμ μ±λ ID | |
SPECIFIC_CHANNEL_ID = int(os.getenv("DISCORD_CHANNEL_ID")) | |
# λν νμ€ν 리λ₯Ό μ μ₯ν μ μ λ³μ | |
conversation_history = [] | |
class MyClient(discord.Client): | |
def __init__(self, *args, **kwargs): | |
super().__init__(*args, **kwargs) | |
self.is_processing = False | |
async def on_ready(self): | |
logging.info(f'{self.user}λ‘ λ‘κ·ΈμΈλμμ΅λλ€!') | |
subprocess.Popen(["python", "web.py"]) | |
logging.info("Web.py server has been started.") | |
async def on_message(self, message): | |
if message.author == self.user: | |
return | |
if not self.is_message_in_specific_channel(message): | |
return | |
if self.is_processing: | |
logging.debug("Currently processing another message, skipping this one.") | |
return | |
self.is_processing = True | |
try: | |
response_parts = await generate_response(message) | |
if response_parts: | |
for part in response_parts: | |
await message.channel.send(part) | |
else: | |
await message.channel.send("μ£μ‘ν©λλ€, μ 곡ν μ μλ μ λ³΄κ° μμ΅λλ€.") | |
finally: | |
self.is_processing = False | |
logging.debug("Message processing completed, ready for the next one.") | |
def is_message_in_specific_channel(self, message): | |
channel_condition = message.channel.id == SPECIFIC_CHANNEL_ID | |
thread_condition = isinstance(message.channel, discord.Thread) and message.channel.parent_id == SPECIFIC_CHANNEL_ID | |
return channel_condition or thread_condition | |
async def generate_response(message): | |
global conversation_history | |
user_input = message.content.strip() | |
user_mention = message.author.mention | |
# μ μ¬ν μ¬κ±΄λͺ λ° νμμ¬ν κ°κ° μ°ΎκΈ° | |
matched_case_names = process.extractBests(user_input, all_case_names, limit=3, score_cutoff=70) | |
matched_case_summaries = process.extractBests(user_input, all_case_summaries, limit=3, score_cutoff=70) | |
logging.debug(f"Matched case names: {matched_case_names}") | |
logging.debug(f"Matched case summaries: {matched_case_summaries}") | |
case_numbers_set = set() | |
if matched_case_names: | |
for case_name, score in matched_case_names: | |
case_numbers_set.update(name_to_number.get(case_name, [])) | |
if matched_case_summaries: | |
for case_summary, score in matched_case_summaries: | |
case_numbers_set.update(summary_to_number.get(case_summary, [])) | |
if case_numbers_set: | |
case_numbers_str = "\n".join(case_numbers_set) | |
system_message = f"{user_mention}, '{user_input}'μ μ μ¬ν μ¬κ±΄μ μ¬κ±΄λ²νΈλ λ€μκ³Ό κ°μ΅λλ€:\n{case_numbers_str}" | |
elif user_input in number_to_fulltext: | |
full_text = number_to_fulltext[user_input] | |
system_message = f"{user_mention}, μ¬κ±΄λ²νΈ '{user_input}'μ μ λ¬Έμ λ€μκ³Ό κ°μ΅λλ€:\n\n{full_text}" | |
else: | |
system_message = f"{user_mention}, κ΄λ ¨ λ²λ₯ μ 보λ₯Ό μ°Ύμ μ μμ΅λλ€." | |
# λ©μμ§ κΈΈμ΄ μ ν μ²λ¦¬ | |
max_length = 2000 | |
response_parts = [] | |
for i in range(0, len(system_message), max_length): | |
part_response = system_message[i:i + max_length] | |
response_parts.append(part_response) | |
return response_parts | |
if __name__ == "__main__": | |
discord_client = MyClient(intents=intents) | |
discord_client.run(os.getenv('DISCORD_TOKEN')) | |