Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
from fastapi import FastAPI, HTTPException, Query
|
2 |
from fastapi.responses import JSONResponse, StreamingResponse
|
3 |
-
from webscout import WEBS, YTTranscriber, LLM
|
4 |
from typing import Optional, List, Dict
|
5 |
from fastapi.encoders import jsonable_encoder
|
6 |
from bs4 import BeautifulSoup
|
@@ -13,9 +13,9 @@ from huggingface_hub import InferenceClient
|
|
13 |
from PIL import Image
|
14 |
import io
|
15 |
from easygoogletranslate import EasyGoogleTranslate
|
16 |
-
|
17 |
from pydantic import BaseModel
|
18 |
|
|
|
19 |
app = FastAPI()
|
20 |
|
21 |
# Define Pydantic models for request payloads
|
@@ -427,7 +427,59 @@ async def adv_web_search(
|
|
427 |
|
428 |
except Exception as e:
|
429 |
raise HTTPException(status_code=500, detail=f"Error during advanced search: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
430 |
|
|
|
|
|
431 |
|
432 |
@app.get("/api/website_summarizer")
|
433 |
async def website_summarizer(url: str, proxy: Optional[str] = None):
|
@@ -483,22 +535,7 @@ async def ask_website(url: str, question: str, model: str = "llama-3-70b", proxy
|
|
483 |
except Exception as e:
|
484 |
raise HTTPException(status_code=500, detail=f"Error during question answering: {e}")
|
485 |
|
486 |
-
# Stable Diffusion client
|
487 |
-
client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
|
488 |
|
489 |
-
@app.get("/api/sd3")
|
490 |
-
def sd3(prompt: str = "", steps: int = 20, width: int = 1000, height: int = 1000):
|
491 |
-
try:
|
492 |
-
image = client_sd3.text_to_image(
|
493 |
-
prompt=f"{prompt}, hd, high quality, 4k, masterpiece",
|
494 |
-
num_inference_steps=steps,
|
495 |
-
width=width,
|
496 |
-
height=height
|
497 |
-
)
|
498 |
-
image = Image.open(io.BytesIO(image))
|
499 |
-
return image
|
500 |
-
except Exception as e:
|
501 |
-
raise HTTPException(status_code=500, detail=f"Error during image generation: {e}")
|
502 |
|
503 |
@app.get("/api/translate")
|
504 |
async def translate(
|
@@ -562,4 +599,4 @@ def get_ascii_weather(location: str):
|
|
562 |
|
563 |
if __name__ == "__main__":
|
564 |
import uvicorn
|
565 |
-
uvicorn.run(app, host="0.0.0.0", port=8083)
|
|
|
1 |
from fastapi import FastAPI, HTTPException, Query
|
2 |
from fastapi.responses import JSONResponse, StreamingResponse
|
3 |
+
from webscout import WEBS, YTTranscriber, LLM, GoogleS
|
4 |
from typing import Optional, List, Dict
|
5 |
from fastapi.encoders import jsonable_encoder
|
6 |
from bs4 import BeautifulSoup
|
|
|
13 |
from PIL import Image
|
14 |
import io
|
15 |
from easygoogletranslate import EasyGoogleTranslate
|
|
|
16 |
from pydantic import BaseModel
|
17 |
|
18 |
+
|
19 |
app = FastAPI()
|
20 |
|
21 |
# Define Pydantic models for request payloads
|
|
|
427 |
|
428 |
except Exception as e:
|
429 |
raise HTTPException(status_code=500, detail=f"Error during advanced search: {e}")
|
430 |
+
@app.post("/api/AI_search_google")
|
431 |
+
async def adv_web_search(
|
432 |
+
q: str,
|
433 |
+
model: str = "gpt-4o-mini", # Use webs.chat by default
|
434 |
+
max_results: int = 5,
|
435 |
+
timelimit: Optional[str] = None,
|
436 |
+
safesearch: str = "moderate",
|
437 |
+
region: str = "wt-wt",
|
438 |
+
# backend: str = "html",
|
439 |
+
max_chars: int = 15000,
|
440 |
+
system_prompt: str = "You are an advanced AI chatbot. Provide the best answer to the user based on Google search results.",
|
441 |
+
proxy: Optional[str] = None
|
442 |
+
):
|
443 |
+
"""
|
444 |
+
Combines web search, web extraction, and chat model for advanced search.
|
445 |
+
"""
|
446 |
+
try:
|
447 |
+
with GoogleS(proxy=proxy) as webs:
|
448 |
+
search_results = webs.search(query=q, region=region,
|
449 |
+
safe=safesearch,
|
450 |
+
time_period=timelimit,
|
451 |
+
max_results=max_results)
|
452 |
+
# 2. Extract text from top search result URLs asynchronously
|
453 |
+
extracted_text = ""
|
454 |
+
tasks = [fetch_and_extract(result['href'], 6000, proxy) for result in search_results if 'href' in result]
|
455 |
+
extracted_results = await asyncio.gather(*tasks)
|
456 |
+
for result in extracted_results:
|
457 |
+
if result['text'] and len(extracted_text) < max_chars:
|
458 |
+
extracted_text += f"## Content from: {result['link']}\n\n{result['text']}\n\n"
|
459 |
+
|
460 |
+
extracted_text[:max_chars]
|
461 |
+
|
462 |
+
|
463 |
+
# 3. Construct the prompt for the chat model
|
464 |
+
ai_prompt = (
|
465 |
+
f"User Query: {q}\n\n"
|
466 |
+
f"Please provide a detailed and accurate answer to the user's query. Include relevant information extracted from the search results below. Ensure to cite sources by providing links to the original content where applicable. Format your response as follows:\n\n"
|
467 |
+
f"1. **Answer:** Provide a clear and comprehensive answer to the user's query.\n"
|
468 |
+
f"2. **Details:** Include any additional relevant details or explanations.\n"
|
469 |
+
f"3. **Summary:** Provide a summary of the search results. **"
|
470 |
+
f"4. **Sources:** List the sources of the information with clickable links for further reading.\n\n"
|
471 |
+
f"Search Results:\n{extracted_text}"
|
472 |
+
)
|
473 |
+
|
474 |
+
# 4. Get the chat model's response using webs.chat
|
475 |
+
with WEBS(proxy=proxy) as webs:
|
476 |
+
response = webs.chat(keywords=ai_prompt, model=model)
|
477 |
+
|
478 |
+
# 5. Return the results
|
479 |
+
return JSONResponse(content={"answer": response})
|
480 |
|
481 |
+
except Exception as e:
|
482 |
+
raise HTTPException(status_code=500, detail=f"Error during advanced search: {e}")
|
483 |
|
484 |
@app.get("/api/website_summarizer")
|
485 |
async def website_summarizer(url: str, proxy: Optional[str] = None):
|
|
|
535 |
except Exception as e:
|
536 |
raise HTTPException(status_code=500, detail=f"Error during question answering: {e}")
|
537 |
|
|
|
|
|
538 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
539 |
|
540 |
@app.get("/api/translate")
|
541 |
async def translate(
|
|
|
599 |
|
600 |
if __name__ == "__main__":
|
601 |
import uvicorn
|
602 |
+
uvicorn.run(app, host="0.0.0.0", port=8083)
|