Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,9 +6,6 @@ from fastapi import FastAPI
|
|
6 |
from fastapi.middleware.cors import CORSMiddleware
|
7 |
import os
|
8 |
from dotenv import load_dotenv
|
9 |
-
import gradio as gr
|
10 |
-
import requests
|
11 |
-
import asyncio
|
12 |
from pydantic import BaseModel
|
13 |
|
14 |
load_dotenv()
|
@@ -50,7 +47,7 @@ def generate_model_response(model, inputs):
|
|
50 |
try:
|
51 |
if model is None:
|
52 |
return "Model loading failed."
|
53 |
-
response = model(inputs, max_tokens=512)
|
54 |
return remove_duplicates(response['choices'][0]['text'])
|
55 |
except Exception as e:
|
56 |
print(f"Error generating response: {e}")
|
@@ -65,7 +62,7 @@ app.add_middleware(
|
|
65 |
@app.post("/generate")
|
66 |
async def generate(request: ChatRequest):
|
67 |
inputs = normalize_input(request.message)
|
68 |
-
chunk_size = 400
|
69 |
chunks = [inputs[i:i + chunk_size] for i in range(0, len(inputs), chunk_size)]
|
70 |
overall_response = ""
|
71 |
for chunk in chunks:
|
@@ -76,24 +73,6 @@ async def generate(request: ChatRequest):
|
|
76 |
overall_response += f"**{response['model']}:**\n{response['response']}\n\n"
|
77 |
return {"response": overall_response}
|
78 |
|
79 |
-
async def process_message(message, history):
|
80 |
-
try:
|
81 |
-
port = os.environ.get("PORT", 7860)
|
82 |
-
response = requests.post(f"http://localhost:{port}/generate", json={"message": message}).json()
|
83 |
-
formatted_response = response["response"]
|
84 |
-
history.append((message, formatted_response))
|
85 |
-
return history, history
|
86 |
-
except requests.exceptions.RequestException as e:
|
87 |
-
return history, f"Error: {e}"
|
88 |
-
|
89 |
-
iface = gr.Interface(
|
90 |
-
fn=process_message,
|
91 |
-
inputs=[gr.Textbox(lines=2, placeholder="Enter your message here..."), gr.State([])],
|
92 |
-
outputs=[gr.Chatbot(), gr.State([])],
|
93 |
-
title="Multi-Model LLM API", description="Enter a message and get responses from multiple LLMs."
|
94 |
-
)
|
95 |
-
|
96 |
if __name__ == "__main__":
|
97 |
port = int(os.environ.get("PORT", 7860))
|
98 |
-
uvicorn.run(app, host="0.0.0.0", port=port)
|
99 |
-
iface.launch(server_port=7860)
|
|
|
6 |
from fastapi.middleware.cors import CORSMiddleware
|
7 |
import os
|
8 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
9 |
from pydantic import BaseModel
|
10 |
|
11 |
load_dotenv()
|
|
|
47 |
try:
|
48 |
if model is None:
|
49 |
return "Model loading failed."
|
50 |
+
response = model(inputs, max_tokens=512)
|
51 |
return remove_duplicates(response['choices'][0]['text'])
|
52 |
except Exception as e:
|
53 |
print(f"Error generating response: {e}")
|
|
|
62 |
@app.post("/generate")
|
63 |
async def generate(request: ChatRequest):
|
64 |
inputs = normalize_input(request.message)
|
65 |
+
chunk_size = 400
|
66 |
chunks = [inputs[i:i + chunk_size] for i in range(0, len(inputs), chunk_size)]
|
67 |
overall_response = ""
|
68 |
for chunk in chunks:
|
|
|
73 |
overall_response += f"**{response['model']}:**\n{response['response']}\n\n"
|
74 |
return {"response": overall_response}
|
75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
if __name__ == "__main__":
|
77 |
port = int(os.environ.get("PORT", 7860))
|
78 |
+
uvicorn.run(app, host="0.0.0.0", port=port)
|
|