import streamlit as st
import anthropic
import openai
import base64
from datetime import datetime
import plotly.graph_objects as go
import cv2
import glob
import json
import math
import os
import pytz
import random
import re
import requests
import streamlit.components.v1 as components
import textract
import time
import zipfile
from audio_recorder_streamlit import audio_recorder
from bs4 import BeautifulSoup
from collections import deque
from dotenv import load_dotenv
from gradio_client import Client, handle_file
from huggingface_hub import InferenceClient
from io import BytesIO
from moviepy.editor import VideoFileClip
from PIL import Image
from PyPDF2 import PdfReader
from urllib.parse import quote
from xml.etree import ElementTree as ET
from openai import OpenAI
# Configuration and Setup
Site_Name = 'π€π§ Combined AI Appππ¬'
title = "π€π§ Combined AI Appππ¬"
helpURL = 'https://huggingface.co/awacke1'
bugURL = 'https://huggingface.co/spaces/awacke1'
icons = 'π€π§ π¬π'
st.set_page_config(
page_title=title,
page_icon=icons,
layout="wide",
initial_sidebar_state="auto",
menu_items={
'Get Help': helpURL,
'Report a bug': bugURL,
'About': title
}
)
# Load environment variables and initialize clients
load_dotenv()
# OpenAI setup
openai.api_key = os.getenv('OPENAI_API_KEY')
if openai.api_key == None:
openai.api_key = st.secrets['OPENAI_API_KEY']
openai_client = OpenAI(
api_key=os.getenv('OPENAI_API_KEY'),
organization=os.getenv('OPENAI_ORG_ID')
)
# Claude setup
anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
if anthropic_key == None:
anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
claude_client = anthropic.Anthropic(api_key=anthropic_key)
# Initialize session states
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-4"
if "messages" not in st.session_state:
st.session_state.messages = []
if "search_queries" not in st.session_state:
st.session_state.search_queries = []
if 'selected_file' not in st.session_state:
st.session_state.selected_file = None
if 'view_mode' not in st.session_state:
st.session_state.view_mode = 'view'
if 'files' not in st.session_state:
st.session_state.files = []
# Custom CSS
st.markdown("""
""", unsafe_allow_html=True)
# Bike Collections
bike_collections = {
"Celestial Collection π": {
"Eclipse Vaulter": {
"prompt": """Cinematic shot of a sleek black mountain bike silhouetted against a total solar eclipse.
The corona creates an ethereal halo effect, with lens flares accentuating key points of the frame.
Dynamic composition shows the bike mid-leap, with stardust particles trailing behind.
Camera angle: Low angle, wide shot
Lighting: Dramatic rim lighting from eclipse
Color palette: Deep purples, cosmic blues, corona gold""",
"emoji": "π"
},
"Starlight Leaper": {
"prompt": """A black bike performing an epic leap under a vast Milky Way galaxy.
Shimmering stars blanket the sky while the bike's wheels leave a trail of stardust.
Camera angle: Wide-angle upward shot
Lighting: Natural starlight with subtle rim lighting
Color palette: Deep blues, silver highlights, cosmic purples""",
"emoji": "β¨"
},
"Moonlit Hopper": {
"prompt": """A sleek black bike mid-hop over a moonlit meadow,
the full moon illuminating the misty surroundings. Fireflies dance around the bike,
and soft shadows create a serene yet dynamic atmosphere.
Camera angle: Side profile with slight low angle
Lighting: Soft moonlight with atmospheric fog
Color palette: Silver blues, soft whites, deep shadows""",
"emoji": "π"
}
},
"Nature-Inspired Collection π²": {
"Shadow Grasshopper": {
"prompt": """A black bike jumping between forest paths,
with dappled sunlight streaming through the canopy. Shadows dance on the bike's frame
as it soars above mossy logs.
Camera angle: Through-the-trees tracking shot
Lighting: Natural forest lighting with sun rays
Color palette: Forest greens, golden sunlight, deep shadows""",
"emoji": "π¦"
},
"Onyx Leapfrog": {
"prompt": """A bike with obsidian-black finish jumping over a sparkling creek,
the reflection on the water broken into ripples by the leap. The surrounding forest
is vibrant with greens and browns.
Camera angle: Low angle from water level
Lighting: Golden hour side lighting
Color palette: Deep blacks, water blues, forest greens""",
"emoji": "πΈ"
}
}
}
# File Operations Functions
def create_file(filename, prompt, response, is_image=False, should_save=True):
"""Basic file creation with prompt and response."""
if not should_save:
return None
with open(filename, "w", encoding="utf-8") as f:
f.write(prompt + "\n\n" + response)
def generate_filename(prompt, file_type):
"""Generate a safe filename using the prompt and file type."""
central = pytz.timezone('US/Central')
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:240]
return f"{safe_date_time}_{safe_prompt}.{file_type}"
def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
"""Create and save file with proper handling of different types."""
if not should_save:
return None
filename = generate_filename(prompt if prompt else content, file_type)
with open(filename, "w", encoding="utf-8") as f:
if is_image:
f.write(content)
else:
f.write(prompt + "\n\n" + content if prompt else content)
return filename
def get_download_link(file_path):
"""Create download link for file."""
with open(file_path, "rb") as file:
contents = file.read()
b64 = base64.b64encode(contents).decode()
return f'Download {os.path.basename(file_path)}π'
def load_file(file_name):
"""Load file content."""
with open(file_name, "r", encoding='utf-8') as file:
content = file.read()
return content
def create_zip_of_files(files):
"""Create zip archive of files."""
zip_name = "all_files.zip"
with zipfile.ZipFile(zip_name, 'w') as zipf:
for file in files:
zipf.write(file)
return zip_name
def get_media_html(media_path, media_type="video", width="100%"):
"""Generate HTML for media player."""
media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
if media_type == "video":
return f'''
'''
else: # audio
return f'''
'''
# Speech Synthesis
@st.cache_resource
def SpeechSynthesis(result):
"""HTML5 Speech Synthesis."""
documentHTML5 = f'''
Read It Aloud
π Read It Aloud
'''
components.html(documentHTML5, width=1280, height=300)
# ArXiv Search Functions (Combined into one function)
def search_arxiv(query, should_save=True):
"""Search ArXiv papers using Hugging Face client."""
st.write("Performing AI Lookup...")
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
start_time = time.strftime("%Y-%m-%d %H:%M:%S")
# First query - Get papers
response1 = client.predict(
query,
10,
"Semantic Search",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
api_name="/update_with_rag_md"
)
# Second query - Get summary
response2 = client.predict(
query,
"mistralai/Mixtral-8x7B-Instruct-v0.1",
True,
api_name="/ask_llm"
)
Question = '### π ' + query + '\r\n'
References = response1[0]
ReferenceLinks = extract_urls(References)
results = Question + '\r\n' + response2 + '\r\n' + References + '\r\n' + ReferenceLinks
st.markdown(results)
SpeechSynthesis(results)
end_time = time.strftime("%Y-%m-%d %H:%M:%S")
start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
elapsed_seconds = end_timestamp - start_timestamp
st.write(f"Start time: {start_time}")
st.write(f"Finish time: {end_time}")
st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
filename = generate_filename(query, "md")
create_file(filename, query, results, should_save=should_save)
return results
def extract_urls(text):
"""Extract URLs from ArXiv search results."""
try:
date_pattern = re.compile(r'### (\d{2} \w{3} \d{4})')
abs_link_pattern = re.compile(r'\[(.*?)\]\((https://arxiv\.org/abs/\d+\.\d+)\)')
pdf_link_pattern = re.compile(r'\[β¬οΈ\]\((https://arxiv\.org/pdf/\d+\.\d+)\)')
title_pattern = re.compile(r'### \d{2} \w{3} \d{4} \| \[(.*?)\]')
date_matches = date_pattern.findall(text)
abs_link_matches = abs_link_pattern.findall(text)
pdf_link_matches = pdf_link_pattern.findall(text)
title_matches = title_pattern.findall(text)
markdown_text = ""
for i in range(len(date_matches)):
date = date_matches[i]
title = title_matches[i]
abs_link = abs_link_matches[i][1]
pdf_link = pdf_link_matches[i]
markdown_text += f"**Date:** {date}\n\n"
markdown_text += f"**Title:** {title}\n\n"
markdown_text += f"**Abstract Link:** [{abs_link}]({abs_link})\n\n"
markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
markdown_text += "---\n\n"
return markdown_text
except:
st.write('Error extracting URLs')
return ''
# Media Processing Functions
def process_image(image_input, user_prompt):
"""Process image with GPT-4 vision."""
if isinstance(image_input, str):
with open(image_input, "rb") as image_file:
image_input = image_file.read()
base64_image = base64.b64encode(image_input).decode("utf-8")
response = openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
{"role": "user", "content": [
{"type": "text", "text": user_prompt},
{"type": "image_url", "image_url": {
"url": f"data:image/png;base64,{base64_image}"
}}
]}
],
temperature=0.0,
)
return response.choices[0].message.content
def process_audio(audio_input, text_input=''):
"""Process audio with Whisper and GPT."""
if isinstance(audio_input, str):
with open(audio_input, "rb") as file:
audio_input = file.read()
transcription = openai.Audio.transcribe(
model="whisper-1",
file=audio_input,
)
st.session_state.messages.append({"role": "user", "content": transcription.text})
with st.chat_message("assistant"):
st.markdown(transcription.text)
SpeechSynthesis(transcription.text)
filename = generate_filename(transcription.text, "wav")
create_and_save_file(audio_input, "wav", transcription.text, True)
def save_and_play_audio(audio_recorder):
"""Save and play recorded audio."""
audio_bytes = audio_recorder()
if audio_bytes:
filename = generate_filename("Recording", "wav")
with open(filename, 'wb') as f:
f.write(audio_bytes)
st.audio(audio_bytes, format="audio/wav")
return filename
return None
def process_video(video_path, seconds_per_frame=1):
"""Process video files for frame extraction and audio."""
base64Frames = []
video = cv2.VideoCapture(video_path)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
fps = video.get(cv2.CAP_PROP_FPS)
frames_to_skip = int(fps * seconds_per_frame)
for frame_idx in range(0, total_frames, frames_to_skip):
video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
success, frame = video.read()
if not success:
break
_, buffer = cv2.imencode(".jpg", frame)
base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
video.release()
# Extract audio
base_video_path = os.path.splitext(video_path)[0]
audio_path = f"{base_video_path}.mp3"
try:
video_clip = VideoFileClip(video_path)
video_clip.audio.write_audiofile(audio_path)
video_clip.close()
except:
st.warning("No audio track found in video")
audio_path = None
return base64Frames, audio_path
def process_video_with_gpt(video_input, user_prompt):
"""Process video with GPT-4 vision."""
base64Frames, audio_path = process_video(video_input)
response = openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": "system", "content": "Analyze the video frames and provide a detailed description."},
{"role": "user", "content": [
{"type": "text", "text": user_prompt},
*[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
for frame in base64Frames]
]}
]
)
return response.choices[0].message.content
def create_media_gallery():
"""Create the media gallery interface."""
st.header("π¬ Media Gallery")
tabs = st.tabs(["πΌοΈ Images", "π΅ Audio", "π₯ Video", "π¨ Scene Generator"])
with tabs[0]: # Images
image_files = glob.glob("*.png") + glob.glob("*.jpg")
if image_files:
num_cols = st.slider("Number of columns", 1, 5, 3)
cols = st.columns(num_cols)
for idx, image_file in enumerate(image_files):
with cols[idx % num_cols]:
img = Image.open(image_file)
st.image(img, use_column_width=True)
if st.button(f"Analyze {os.path.basename(image_file)}"):
analysis = process_image(image_file,
"Describe this image in detail and identify key elements.")
st.markdown(analysis)
SpeechSynthesis(analysis)
with tabs[1]: # Audio
audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
for audio_file in audio_files:
with st.expander(f"π΅ {os.path.basename(audio_file)}"):
st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
if st.button(f"Transcribe {os.path.basename(audio_file)}"):
with open(audio_file, "rb") as f:
transcription = process_audio(f)
st.write(transcription)
SpeechSynthesis(transcription)
with tabs[2]: # Video
video_files = glob.glob("*.mp4")
for video_file in video_files:
with st.expander(f"π₯ {os.path.basename(video_file)}"):
st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
if st.button(f"Analyze {os.path.basename(video_file)}"):
analysis = process_video_with_gpt(video_file,
"Describe what's happening in this video.")
st.markdown(analysis)
SpeechSynthesis(analysis)
with tabs[3]: # Scene Generator
for collection_name, bikes in bike_collections.items():
st.subheader(collection_name)
cols = st.columns(len(bikes))
for idx, (bike_name, details) in enumerate(bikes.items()):
with cols[idx]:
st.markdown(f"""
{details['emoji']} {bike_name}
{details['prompt']}
""", unsafe_allow_html=True)
if st.button(f"Generate {bike_name} Scene"):
prompt = details['prompt']
st.write(f"Generated scene description for {bike_name}:")
st.write(prompt)
SpeechSynthesis(prompt)
# Chat Processing Functions
def process_with_gpt(text_input, should_save=True):
"""Process text with GPT-4."""
if text_input:
st.session_state.messages.append({"role": "user", "content": text_input})
with st.chat_message("user"):
st.markdown(text_input)
with st.chat_message("assistant"):
completion = openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=False
)
return_text = completion.choices[0].message.content
st.write("GPT-4: " + return_text)
filename = generate_filename(text_input, "md")
create_file(filename, text_input, return_text, should_save=should_save)
st.session_state.messages.append({"role": "assistant", "content": return_text})
return return_text
def process_with_claude(text_input, should_save=True):
"""Process text with Claude."""
if text_input:
response = claude_client.completions.create(
model="claude-2",
max_tokens_to_sample=1000,
prompt=text_input
)
response_text = response.completion
st.write("Claude: " + response_text)
filename = generate_filename(text_input, "md")
create_file(filename, text_input, response_text, should_save=should_save)
st.session_state.chat_history.append({
"user": text_input,
"claude": response_text
})
return response_text
def display_file_manager():
"""Display file management sidebar."""
st.sidebar.title("π File Management")
all_files = glob.glob("*.md")
all_files.sort(reverse=True)
if st.sidebar.button("π Delete All"):
for file in all_files:
os.remove(file)
st.rerun()
if st.sidebar.button("β¬οΈ Download All"):
zip_file = create_zip_of_files(all_files)
st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
for file in all_files:
col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
with col1:
if st.button("π", key="view_"+file):
st.session_state.selected_file = file
st.session_state.file_content = load_file(file)
SpeechSynthesis(st.session_state.file_content)
with col2:
st.markdown(get_download_link(file), unsafe_allow_html=True)
with col3:
if st.button("π", key="edit_"+file):
st.session_state.selected_file = file
st.session_state.file_content = load_file(file)
with col4:
if st.button("π", key="delete_"+file):
os.remove(file)
st.rerun()
def display_file_content(file_path):
"""Display file content with editing capabilities."""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
if st.session_state.view_mode == 'view':
st.markdown(content)
else:
edited_content = st.text_area(
"Edit content",
content,
height=400,
key=f"edit_{os.path.basename(file_path)}"
)
if st.button("Save Changes", key=f"save_{os.path.basename(file_path)}"):
try:
with open(file_path, 'w', encoding='utf-8') as f:
f.write(edited_content)
st.success(f"Successfully saved changes to {file_path}")
except Exception as e:
st.error(f"Error saving changes: {e}")
except Exception as e:
st.error(f"Error reading file: {e}")
def main():
st.title("π Combined AI Assistant App")
# Main navigation with radio buttons
ai_options = ["π¬ Chat with GPT-4", "π¬ Chat with Claude", "π Search ArXiv", "πΈ Media Gallery", "π File Editor"]
tab_main = st.radio("Choose Action:", ai_options, horizontal=True)
if tab_main == "π¬ Chat with GPT-4":
user_input = st.text_area("Message:", height=100)
if st.button("Send π¨"):
if user_input:
gpt_response = process_with_gpt(user_input)
SpeechSynthesis(gpt_response)
# Display Chat History
st.subheader("Chat History π")
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
elif tab_main == "π¬ Chat with Claude":
user_input = st.text_area("Message:", height=100)
if st.button("Send π¨"):
if user_input:
claude_response = process_with_claude(user_input)
SpeechSynthesis(claude_response)
# Display Chat History
st.subheader("Chat History π")
for chat in st.session_state.chat_history:
st.text_area("You:", chat["user"], height=100, disabled=True)
st.text_area("Claude:", chat["claude"], height=200, disabled=True)
st.markdown("---")
elif tab_main == "π Search ArXiv":
query_params = st.experimental_get_query_params()
query = query_params.get('q', [''])[0]
query = st.text_input("Enter your research query:", value=query)
if query:
with st.spinner("Searching ArXiv..."):
results = search_arxiv(query)
# Save the query and results
filename = generate_filename(query, "md")
create_file(filename, query, results)
st.session_state.selected_file = filename
st.session_state.file_content = results
SpeechSynthesis(results)
elif tab_main == "πΈ Media Gallery":
create_media_gallery()
elif tab_main == "π File Editor":
if st.session_state.selected_file:
st.subheader(f"Editing: {st.session_state.selected_file}")
display_file_content(st.session_state.selected_file)
else:
st.write("No file selected.")
# Always show file manager in sidebar
display_file_manager()
if __name__ == "__main__":
main()