Spaces:
Sleeping
Sleeping
import discord | |
import aiohttp | |
import ast | |
import os | |
import threading | |
intents = discord.Intents.default() | |
intents.message_content = True | |
bot = discord.Bot(intents = intents) | |
token = os.environ.get('TOKEN_DISCORD') | |
class Like_Dislike(discord.ui.View): | |
async def like_button(self, button, interaction): | |
await interaction.response.send_message("You liked the response") | |
async def dislike_button(self, button, interaction): | |
await interaction.response.send_message("You disliked the response") | |
async def on_ready(): | |
print(f"{bot.user} is ready and online!") | |
async def help(ctx: discord.ApplicationContext): | |
await ctx.respond("Hello! FURY Bot responds to all your messages\ | |
\n1)Inside Forum channel and\ | |
\n2)Those that tag the bot.") | |
async def llm_output(question: str, context: str) -> str: | |
""" | |
Returns output from the LLM using the given user-question and retrived context | |
""" | |
URL_LLM = 'https://robinroy03-fury-bot.hf.space' | |
# URL_LLM = 'http://localhost:11434' # NOTE: FOR TESTING | |
prompt = f""" | |
You are a senior FURY developer. FURY is a high level python graphics API similar to VTK. | |
Question: {question} | |
Context: {context} | |
""" | |
obj = { | |
"model": "llama3-70b-8192", | |
"prompt": prompt, | |
"stream": False | |
} | |
async with aiohttp.ClientSession() as session: | |
async with session.post(URL_LLM + "/api/generate", json=obj) as response: | |
if response.status == 500: | |
return "Sorry, an internal Error happened. Please try again later.\nError 500." | |
response_json = await response.json() | |
return response_json['choices'][0]['message']['content'] | |
async def embedding_output(message: str) -> list: | |
""" | |
Returns embeddings for the given message | |
rtype: list of embeddings. Length depends on the model. | |
""" | |
URL_EMBEDDING = 'https://robinroy03-fury-embeddings-endpoint.hf.space' | |
async with aiohttp.ClientSession() as session: | |
async with session.post(URL_EMBEDDING + "/embedding", json={"text": message}) as response: | |
response_json = await response.json(content_type=None) | |
return response_json['output'] | |
async def db_output(embedding: list) -> dict: | |
""" | |
Returns the KNN results. | |
rtype: JSON | |
""" | |
URL_DB = 'https://robinroy03-fury-db-endpoint.hf.space' | |
async with aiohttp.ClientSession() as session: | |
async with session.post(URL_DB + "/query", json={"embeddings": embedding}) as response: | |
response_json = await response.json() | |
return response_json | |
async def on_message(message): | |
""" | |
Returns llm answer with the relevant context. | |
""" | |
if (message.author == bot.user) or not(bot.user.mentioned_in(message)): | |
return | |
print(message.content) | |
await message.reply(content="Your message was received, it'll take around 30 seconds for FURY to process an answer.") | |
question = message.content.replace("<@1243428204124045385>", "") | |
embedding: list = await embedding_output(question) | |
db_knn: dict = await db_output(embedding) | |
db_context = "" | |
references = "" | |
for i in range(len(db_knn['matches'])): | |
data = db_knn['matches'][i]['metadata']['data'] | |
db_context += (data + "\n") | |
data = ast.literal_eval(data) | |
references += ("<https://github.com/fury-gl/fury/tree/master/" + data['path'] + ">").replace("//home/robin/Desktop/l/fury", "") | |
if data.get("function_name"): | |
references += f"\tFunction Name: {data.get('function_name')}" | |
elif data.get("class_name"): | |
references += f"\tClass Name: {data.get('class_name')}" | |
elif data['type'] == 'rst': | |
references += f"\tDocumentation: {data['path'].split("/")[-1]}" | |
elif data['type'] == 'documentation_examples': | |
references += f"\tDocumentation: {data['path'].split("/")[-1]}" | |
references += "\n" | |
llm_answer: str = await llm_output(question, db_context) | |
try: | |
start = 0 | |
while llm_answer[start: start+1990] != "": | |
if (start == 0): | |
await message.reply(content=llm_answer[start: start+1990]) | |
else: | |
await message.reply(content=llm_answer[start: start+1990]) | |
start += 1990 | |
await message.reply(content=f"**References**\n{references}", view=Like_Dislike()) | |
except Exception as e: # TODO: make exception handling better | |
print(e) | |
await message.reply("An error occurred. Retry again.") | |
def run_bot(): | |
bot.run(token) | |
# =========================================================================================================================================================== | |
from flask import Flask | |
app = Flask(__name__) | |
def home(): | |
return "The bot is online." | |
threading.Thread(target=run_bot).start() | |
app.run() | |