File size: 3,806 Bytes
f23adce
 
 
 
 
 
 
 
 
 
 
 
12802eb
4afe03d
12802eb
 
 
 
 
f23adce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0aed2c
f23adce
 
 
 
a0aed2c
 
 
 
 
 
 
 
 
 
 
 
 
f23adce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207726a
 
f23adce
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import requests
import json
import ast


def prompt_generator(question: str, db_knn: dict) -> tuple[str, str, str]:
    context = ""
    references = ""
    for i in range(len(db_knn['matches'])):
        data = db_knn['matches'][i]['metadata']['data']
        context += (data + "\n")
        data = ast.literal_eval(data)

        line_number = ""
        if data['type'] == "function" or data['type'] == "class":
            line_number = f"#L{data['lineno'][0]}-L{data['lineno'][1]}"

        references += ("<https://github.com/fury-gl/fury/blob/" + data['path'] + line_number + ">").replace("fury-0.10.0", "v0.10.0")

        if data.get("function_name"):
            references += f"\tFunction Name: {data.get('function_name')}"
        elif data.get("class_name"):
            references += f"\tClass Name: {data.get('class_name')}"
        elif data['type'] == 'rst':
            references += f"\tDocumentation: {data['path'].split("/")[-1]}"
        elif data['type'] == 'documentation_examples':
            references += f"\tDocumentation: {data['path'].split("/")[-1]}"
        references += "\n"

    prompt = f"""
    You are a senior developer. Answer the users question based on the context provided.
    Question: {question}
    Context: {context}
    """
    return prompt, context, references


def groq_llm_output(question: str, db_knn: dict, llm: str, stream: bool) -> tuple[str, str]:
    """
    Returns output from the LLM using the given user-question and retrived context
    """

    URL_LLM = 'https://robinroy03-fury-bot.hf.space'
    prompt, context, references = prompt_generator(question, db_knn)
    obj = {
            'model': llm,
            'prompt': prompt,
            'stream': stream
        }
    response = requests.post(URL_LLM + "/api/groq/generate", json=obj)
    response_json = json.loads(response.text)
    return (response_json['choices'][0]['message']['content'], references)


def google_llm_output(question: str, db_knn: dict, llm: str, stream: bool) -> tuple[str, str]:
    URL_LLM = 'https://robinroy03-fury-bot.hf.space'
    prompt, context, references = prompt_generator(question, db_knn)
    obj = {
            'model': llm,
            'prompt': prompt,
            'stream': stream
        }
    response = requests.post(URL_LLM + "/api/google/generate", json=obj)
    response_json = json.loads(response.text)
    return (response_json['candidates'][0]['content']['parts'][0]['text'], references)


def embedding_output(message: str) -> list:
    """
    Returns embeddings for the given message
    rtype: list of embeddings. Length depends on the model.
    """

    URL_EMBEDDING = 'https://robinroy03-fury-embeddings-endpoint.hf.space'
    response = requests.post(URL_EMBEDDING + "/embedding", json={"text": message})
    response_json = json.loads(response.text)
    return response_json['output']


def db_output(embedding: list, knn: int) -> dict:
    """
    Returns the KNN results.
    rtype: JSON
    """

    URL_DB = 'https://robinroy03-fury-db-endpoint.hf.space'
    response = requests.post(URL_DB + "/query", json={"embeddings": embedding, "knn": knn})
    response_json = json.loads(response.text)
    return response_json


def ollama_llm_output(question: str, db_knn: dict, llm: str, stream: bool) -> tuple[str, str]:
    URL_LLM = 'https://robinroy03-ollama-server-backend.hf.space'
    # URL_LLM = "http://localhost:11434"
    prompt, context, references = prompt_generator(question, db_knn)
    obj = {
        "model": llm,
        "prompt": question,
        "stream": stream
    }
    try:
        response = requests.post(URL_LLM + "/api/generate", json=obj)
    except Exception as e:
        print(e)
        return {"error": e}

    response_json = json.loads(response.text)
    return response_json, references