Spaces:
Sleeping
Sleeping
robinroy03
commited on
Commit
·
a0aed2c
1
Parent(s):
207726a
new google gemini added
Browse files
app.py
CHANGED
@@ -21,7 +21,7 @@ RESPONSE FORMAT:
|
|
21 |
from flask import Flask
|
22 |
from flask import request
|
23 |
|
24 |
-
from utils import embedding_output, db_output, groq_llm_output, ollama_llm_output
|
25 |
|
26 |
|
27 |
app = Flask(__name__)
|
@@ -70,6 +70,26 @@ def ollama_completion():
|
|
70 |
"references": references
|
71 |
}
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
"""
|
74 |
curl -X POST http://localhost:8000/api/groq/generate -H "Content-Type: application/json" -d '{
|
75 |
"query": "How do I create a sphere in FURY?",
|
|
|
21 |
from flask import Flask
|
22 |
from flask import request
|
23 |
|
24 |
+
from utils import embedding_output, db_output, groq_llm_output, ollama_llm_output, google_llm_output
|
25 |
|
26 |
|
27 |
app = Flask(__name__)
|
|
|
70 |
"references": references
|
71 |
}
|
72 |
|
73 |
+
|
74 |
+
@app.route("/api/google/generate", methods=['POST'])
|
75 |
+
def google_completion():
|
76 |
+
message = request.get_json()
|
77 |
+
|
78 |
+
query: str = message['query']
|
79 |
+
llm: str = message['llm']
|
80 |
+
knn: int = int(message['knn'])
|
81 |
+
stream: bool = bool(message['stream'])
|
82 |
+
|
83 |
+
embedding_data = embedding_output(query)
|
84 |
+
db_knn = db_output(embedding_data, knn)
|
85 |
+
response_json, references = google_llm_output(query, db_knn, llm, stream)
|
86 |
+
|
87 |
+
return {
|
88 |
+
"response": response_json['response'],
|
89 |
+
"references": references
|
90 |
+
}
|
91 |
+
|
92 |
+
|
93 |
"""
|
94 |
curl -X POST http://localhost:8000/api/groq/generate -H "Content-Type: application/json" -d '{
|
95 |
"query": "How do I create a sphere in FURY?",
|
utils.py
CHANGED
@@ -41,11 +41,24 @@ def groq_llm_output(question: str, db_knn: dict, llm: str, stream: bool) -> tupl
|
|
41 |
'prompt': prompt,
|
42 |
'stream': stream
|
43 |
}
|
44 |
-
response = requests.post(URL_LLM + "/api/generate", json=obj)
|
45 |
response_json = json.loads(response.text)
|
46 |
return (response_json['choices'][0]['message']['content'], references)
|
47 |
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
def embedding_output(message: str) -> list:
|
50 |
"""
|
51 |
Returns embeddings for the given message
|
|
|
41 |
'prompt': prompt,
|
42 |
'stream': stream
|
43 |
}
|
44 |
+
response = requests.post(URL_LLM + "/api/groq/generate", json=obj)
|
45 |
response_json = json.loads(response.text)
|
46 |
return (response_json['choices'][0]['message']['content'], references)
|
47 |
|
48 |
|
49 |
+
def google_llm_output(question: str, db_knn: dict, llm: str, stream: bool) -> tuple[str, str]:
|
50 |
+
URL_LLM = 'https://robinroy03-fury-bot.hf.space'
|
51 |
+
prompt, context, references = prompt_generator(question, db_knn)
|
52 |
+
obj = {
|
53 |
+
'model': llm,
|
54 |
+
'prompt': prompt,
|
55 |
+
'stream': stream
|
56 |
+
}
|
57 |
+
response = requests.post(URL_LLM + "/api/google/generate", json=obj)
|
58 |
+
response_json = json.loads(response.text)
|
59 |
+
return (response_json['candidates'][0]['content']['parts'][0]['text'], references)
|
60 |
+
|
61 |
+
|
62 |
def embedding_output(message: str) -> list:
|
63 |
"""
|
64 |
Returns embeddings for the given message
|