Spaces:
Sleeping
Sleeping
import requests | |
import json | |
import ast | |
def prompt_generator(question: str, db_knn: dict) -> tuple[str, str, str]: | |
context = "" | |
references = "" | |
for i in range(len(db_knn['matches'])): | |
data = db_knn['matches'][i]['metadata']['data'] | |
context += (data + "\n") | |
data = ast.literal_eval(data) | |
line_number = "" | |
if data['type'] == "function" or data['type'] == "class": | |
line_number = f"#L{data['lineno'][0]}-L{data['lineno'][1]}" | |
references += ("<https://github.com/fury-gl/fury/blob/" + data['path'] + line_number + ">").replace("fury-0.10.0", "v0.10.0") | |
if data.get("function_name"): | |
references += f"\tFunction Name: {data.get('function_name')}" | |
elif data.get("class_name"): | |
references += f"\tClass Name: {data.get('class_name')}" | |
elif data['type'] == 'rst': | |
references += f"\tDocumentation: {data['path'].split("/")[-1]}" | |
elif data['type'] == 'documentation_examples': | |
references += f"\tDocumentation: {data['path'].split("/")[-1]}" | |
references += "\n" | |
prompt = f""" | |
You are a senior developer. Answer the users question based on the context provided. | |
Question: {question} | |
Context: {context} | |
""" | |
return prompt, context, references | |
def groq_llm_output(question: str, db_knn: dict, llm: str, stream: bool) -> tuple[str, str]: | |
""" | |
Returns output from the LLM using the given user-question and retrived context | |
""" | |
URL_LLM = 'https://robinroy03-fury-bot.hf.space' | |
prompt, context, references = prompt_generator(question, db_knn) | |
obj = { | |
'model': llm, | |
'prompt': prompt, | |
'stream': stream | |
} | |
response = requests.post(URL_LLM + "/api/groq/generate", json=obj) | |
response_json = json.loads(response.text) | |
return (response_json['choices'][0]['message']['content'], references) | |
def google_llm_output(question: str, db_knn: dict, llm: str, stream: bool) -> tuple[str, str]: | |
URL_LLM = 'https://robinroy03-fury-bot.hf.space' | |
prompt, context, references = prompt_generator(question, db_knn) | |
obj = { | |
'model': llm, | |
'prompt': prompt, | |
'stream': stream | |
} | |
response = requests.post(URL_LLM + "/api/google/generate", json=obj) | |
response_json = json.loads(response.text) | |
return (response_json['candidates'][0]['content']['parts'][0]['text'], references) | |
def embedding_output(message: str) -> list: | |
""" | |
Returns embeddings for the given message | |
rtype: list of embeddings. Length depends on the model. | |
""" | |
URL_EMBEDDING = 'https://robinroy03-fury-embeddings-endpoint.hf.space' | |
response = requests.post(URL_EMBEDDING + "/embedding", json={"text": message}) | |
response_json = json.loads(response.text) | |
return response_json['output'] | |
def db_output(embedding: list, knn: int) -> dict: | |
""" | |
Returns the KNN results. | |
rtype: JSON | |
""" | |
URL_DB = 'https://robinroy03-fury-db-endpoint.hf.space' | |
response = requests.post(URL_DB + "/query", json={"embeddings": embedding, "knn": knn}) | |
response_json = json.loads(response.text) | |
return response_json | |
def ollama_llm_output(question: str, db_knn: dict, llm: str, stream: bool) -> tuple[str, str]: | |
URL_LLM = 'https://robinroy03-ollama-server-backend.hf.space' | |
# URL_LLM = "http://localhost:11434" | |
prompt, context, references = prompt_generator(question, db_knn) | |
obj = { | |
"model": llm, | |
"prompt": question, | |
"stream": stream | |
} | |
try: | |
response = requests.post(URL_LLM + "/api/generate", json=obj) | |
except Exception as e: | |
print(e) | |
return {"error": e} | |
response_json = json.loads(response.text) | |
return response_json, references |