Spaces:
Sleeping
Sleeping
import discord | |
import logging | |
import os | |
from huggingface_hub import InferenceClient | |
import asyncio | |
import subprocess | |
from datasets import load_dataset | |
# νμ¬ μμ λλ ν 리 μΆλ ₯ | |
print("Current Working Directory:", os.getcwd()) | |
# λ°μ΄ν°μ νμΌ μ΄λ¦ | |
data_file = 'train_0.csv' | |
# νμ¬ μμ λλ ν 리μ νμΌμ΄ μλμ§ νμΈ | |
if os.path.exists(data_file): | |
print(f"File {data_file} exists in the current directory.") | |
else: | |
print(f"File {data_file} does not exist in the current directory.") | |
# μμ λλ ν 리 λ³κ²½ (νμν κ²½μ°) | |
os.chdir('/home/user/app') | |
print("Changed directory to:", os.getcwd()) | |
# λ°μ΄ν°μ λ‘λ | |
law_dataset = load_dataset('csv', data_files=data_file) | |
print("Dataset loaded successfully.") | |
# λ‘κΉ μ€μ | |
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()]) | |
# μΈν νΈ μ€μ | |
intents = discord.Intents.default() | |
intents.message_content = True | |
intents.messages = True | |
intents.guilds = True | |
intents.guild_messages = True | |
# μΆλ‘ API ν΄λΌμ΄μΈνΈ μ€μ | |
hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus", token=os.getenv("HF_TOKEN")) | |
# νΉμ μ±λ ID | |
SPECIFIC_CHANNEL_ID = int(os.getenv("DISCORD_CHANNEL_ID")) | |
# λν νμ€ν 리λ₯Ό μ μ₯ν μ μ λ³μ | |
conversation_history = [] | |
# λ²λ₯ λ°μ΄ν°μ λ‘λ | |
law_dataset = load_dataset('csv', data_files='train_0.csv') | |
class MyClient(discord.Client): | |
def __init__(self, *args, **kwargs): | |
super().__init__(*args, **kwargs) | |
self.is_processing = False | |
async def on_ready(self): | |
logging.info(f'{self.user}λ‘ λ‘κ·ΈμΈλμμ΅λλ€!') | |
subprocess.Popen(["python", "web.py"]) | |
logging.info("Web.py server has been started.") | |
async def on_message(self, message): | |
if message.author == self.user: | |
return | |
if not self.is_message_in_specific_channel(message): | |
return | |
if self.is_processing: | |
return | |
self.is_processing = True | |
try: | |
response = await generate_response(message) | |
# λΉμ΄ μλ μλ΅μ νμΈνκ³ μ²λ¦¬ | |
if response.strip() == "": | |
response = "μ£μ‘ν©λλ€, μ 곡ν μ μλ μ λ³΄κ° μμ΅λλ€." | |
await message.channel.send(response) | |
finally: | |
self.is_processing = False | |
def is_message_in_specific_channel(self, message): | |
return message.channel.id == SPECIFIC_CHANNEL_ID or ( | |
isinstance(message.channel, discord.Thread) and message.channel.parent_id == SPECIFIC_CHANNEL_ID | |
) | |
async def generate_response(message): | |
global conversation_history | |
user_input = message.content | |
user_mention = message.author.mention | |
system_message = f"{user_mention}, DISCORDμμ μ¬μ©μλ€μ μ§λ¬Έμ λ΅νλ μ΄μμ€ν΄νΈμ λλ€." | |
# λ°μ΄ν° κ²μ λ° μλ΅ μ€λΉ | |
answer = search_in_dataset(user_input, law_dataset) | |
full_response_text = system_message + "\n\n" + answer | |
# μλ΅ λΆν μ μ‘ | |
max_length = 2000 | |
if len(full_response_text) > max_length: | |
# λ무 κΈ΄ λ©μμ§λ₯Ό μ¬λ¬ λΆλΆμΌλ‘ λλμ΄ λ³΄λ λλ€. | |
for i in range(0, len(full_response_text), max_length): | |
part_response = full_response_text[i:i+max_length] | |
await message.channel.send(part_response) | |
else: | |
# λ©μμ§ κΈΈμ΄κ° μ μ νλ©΄ ν λ²μ μ μ‘ | |
await message.channel.send(full_response_text) | |
logging.debug(f'Full model response sent: {full_response_text}') | |
conversation_history.append({"role": "assistant", "content": full_response_text}) | |
def search_in_dataset(query, dataset): | |
# κ°λ¨ν κ²μ λ‘μ§μ ꡬνν©λλ€. | |
# μ¬κΈ°μμλ μμ λ‘ λ¨μννκΈ° μν΄ μ²« λ²μ§Έ νλͺ©μ λ°νν©λλ€. | |
for record in dataset['train']: | |
if query in record['μ¬κ±΄λͺ ']: | |
return record['μ¬κ±΄λ²νΈ'] | |
return "κ΄λ ¨ λ²λ₯ μ 보λ₯Ό μ°Ύμ μ μμ΅λλ€." | |
if __name__ == "__main__": | |
discord_client = MyClient(intents=intents) | |
discord_client.run(os.getenv('DISCORD_TOKEN')) | |