Spaces:
Running
Running
seawolf2357
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,10 @@ import discord
|
|
2 |
import logging
|
3 |
import os
|
4 |
from huggingface_hub import InferenceClient
|
|
|
5 |
import asyncio
|
6 |
import subprocess
|
|
|
7 |
|
8 |
# ๋ก๊น
์ค์
|
9 |
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()])
|
@@ -17,7 +19,9 @@ intents.guild_messages = True
|
|
17 |
|
18 |
# ์ถ๋ก API ํด๋ผ์ด์ธํธ ์ค์
|
19 |
hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus", token=os.getenv("HF_TOKEN"))
|
20 |
-
|
|
|
|
|
21 |
|
22 |
# ํน์ ์ฑ๋ ID
|
23 |
SPECIFIC_CHANNEL_ID = int(os.getenv("DISCORD_CHANNEL_ID"))
|
@@ -35,7 +39,6 @@ class MyClient(discord.Client):
|
|
35 |
subprocess.Popen(["python", "web.py"])
|
36 |
logging.info("Web.py server has been started.")
|
37 |
|
38 |
-
|
39 |
async def on_message(self, message):
|
40 |
if message.author == self.user:
|
41 |
return
|
@@ -43,64 +46,49 @@ class MyClient(discord.Client):
|
|
43 |
return
|
44 |
if self.is_processing:
|
45 |
return
|
|
|
46 |
self.is_processing = True
|
47 |
try:
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
50 |
finally:
|
51 |
self.is_processing = False
|
52 |
|
53 |
def is_message_in_specific_channel(self, message):
|
54 |
-
# ๋ฉ์์ง๊ฐ ์ง์ ๋ ์ฑ๋์ด๊ฑฐ๋, ํด๋น ์ฑ๋์ ์ฐ๋ ๋์ธ ๊ฒฝ์ฐ True ๋ฐํ
|
55 |
return message.channel.id == SPECIFIC_CHANNEL_ID or (
|
56 |
isinstance(message.channel, discord.Thread) and message.channel.parent_id == SPECIFIC_CHANNEL_ID
|
57 |
)
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
logging.debug(f'Part received from stream: {part}')
|
87 |
-
if part.choices and part.choices[0].delta and part.choices[0].delta.content:
|
88 |
-
full_response.append(part.choices[0].delta.content)
|
89 |
-
|
90 |
-
full_response_text = ''.join(full_response)
|
91 |
-
logging.debug(f'Full model response: {full_response_text}')
|
92 |
-
|
93 |
-
conversation_history.append({"role": "assistant", "content": full_response_text})
|
94 |
-
return f"{user_mention}, {full_response_text}"
|
95 |
-
|
96 |
-
async def send_long_message(channel, message):
|
97 |
-
"""Discord ๋ฉ์์ง ๊ธธ์ด๊ฐ 2000์๋ฅผ ์ด๊ณผํ๋ ๊ฒฝ์ฐ, ์ด๋ฅผ ๋๋์ด ๋ณด๋
๋๋ค."""
|
98 |
-
if len(message) <= 2000:
|
99 |
-
await channel.send(message)
|
100 |
-
else:
|
101 |
-
parts = [message[i:i+2000] for i in range(0, len(message), 2000)]
|
102 |
-
for part in parts:
|
103 |
-
await channel.send(part)
|
104 |
|
105 |
if __name__ == "__main__":
|
106 |
discord_client = MyClient(intents=intents)
|
|
|
2 |
import logging
|
3 |
import os
|
4 |
from huggingface_hub import InferenceClient
|
5 |
+
from transformers import pipeline
|
6 |
import asyncio
|
7 |
import subprocess
|
8 |
+
import re
|
9 |
|
10 |
# ๋ก๊น
์ค์
|
11 |
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()])
|
|
|
19 |
|
20 |
# ์ถ๋ก API ํด๋ผ์ด์ธํธ ์ค์
|
21 |
hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus", token=os.getenv("HF_TOKEN"))
|
22 |
+
|
23 |
+
# ์ํ ์ ๋ฌธ LLM ํ์ดํ๋ผ์ธ ์ค์
|
24 |
+
math_pipe = pipeline("text-generation", model="AI-MO/NuminaMath-7B-TIR")
|
25 |
|
26 |
# ํน์ ์ฑ๋ ID
|
27 |
SPECIFIC_CHANNEL_ID = int(os.getenv("DISCORD_CHANNEL_ID"))
|
|
|
39 |
subprocess.Popen(["python", "web.py"])
|
40 |
logging.info("Web.py server has been started.")
|
41 |
|
|
|
42 |
async def on_message(self, message):
|
43 |
if message.author == self.user:
|
44 |
return
|
|
|
46 |
return
|
47 |
if self.is_processing:
|
48 |
return
|
49 |
+
|
50 |
self.is_processing = True
|
51 |
try:
|
52 |
+
if self.is_math_question(message.content):
|
53 |
+
response = await self.handle_math_question(message.content)
|
54 |
+
else:
|
55 |
+
response = await self.generate_response(message)
|
56 |
+
await self.send_long_message(message.channel, response)
|
57 |
finally:
|
58 |
self.is_processing = False
|
59 |
|
60 |
def is_message_in_specific_channel(self, message):
|
|
|
61 |
return message.channel.id == SPECIFIC_CHANNEL_ID or (
|
62 |
isinstance(message.channel, discord.Thread) and message.channel.parent_id == SPECIFIC_CHANNEL_ID
|
63 |
)
|
64 |
|
65 |
+
def is_math_question(self, content):
|
66 |
+
return bool(re.search(r'\b(solve|equation|calculate|math)\b', content, re.IGNORECASE))
|
67 |
+
|
68 |
+
async def handle_math_question(self, question):
|
69 |
+
loop = asyncio.get_event_loop()
|
70 |
+
response = await loop.run_in_executor(None, lambda: math_pipe([{"role": "user", "content": question}]))
|
71 |
+
return response[0]['generated_text']
|
72 |
+
|
73 |
+
async def generate_response(self, message):
|
74 |
+
global conversation_history
|
75 |
+
user_input = message.content
|
76 |
+
user_mention = message.author.mention
|
77 |
+
conversation_history.append({"role": "user", "content": user_input})
|
78 |
+
messages = [{"role": "system", "content": "ํ๊ธ๋ก ๋ต๋ณํ์ญ์์ค."}] + conversation_history
|
79 |
+
response = await asyncio.get_event_loop().run_in_executor(None, lambda: hf_client.chat_completion(
|
80 |
+
messages, max_tokens=1000, stream=True, temperature=0.7, top_p=0.85))
|
81 |
+
full_response = ''.join([part.choices[0].delta.content for part in response if part.choices and part.choices[0].delta and part.choices[0].delta.content])
|
82 |
+
conversation_history.append({"role": "assistant", "content": full_response})
|
83 |
+
return f"{user_mention}, {full_response}"
|
84 |
+
|
85 |
+
async def send_long_message(self, channel, message):
|
86 |
+
if len(message) <= 2000:
|
87 |
+
await channel.send(message)
|
88 |
+
else:
|
89 |
+
parts = [message[i:i+2000] for i in range(0, len(message), 2000)]
|
90 |
+
for part in parts:
|
91 |
+
await channel.send(part)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
if __name__ == "__main__":
|
94 |
discord_client = MyClient(intents=intents)
|