Alibrown commited on
Commit
6613fa7
·
verified ·
1 Parent(s): 14a41b1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +169 -0
app.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import google.generativeai as genai
3
+ from PIL import Image
4
+ import io
5
+ import base64
6
+ import pandas as pd
7
+ import zipfile
8
+ import PyPDF2
9
+
10
+ # Konfiguration der Seite
11
+ st.set_page_config(page_title="Gemini AI Chat", layout="wide")
12
+
13
+ st.title("🤖 Gemini AI Chat Interface")
14
+ st.markdown("""
15
+ **Welcome to the Gemini AI Chat Interface!**
16
+ Chat seamlessly with Google's advanced Gemini AI models, supporting multiple input types.
17
+ 🔗 [GitHub Profile](https://github.com/volkansah) |
18
+ 📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) |
19
+ 💬 [Soon](https://aicodecraft.io)
20
+ """)
21
+
22
+ # Session State Management
23
+ if "messages" not in st.session_state:
24
+ st.session_state.messages = []
25
+ if "uploaded_content" not in st.session_state:
26
+ st.session_state.uploaded_content = None
27
+
28
+ # Funktionen zur Dateiverarbeitung
29
+ def encode_image(image):
30
+ buffered = io.BytesIO()
31
+ image.save(buffered, format="JPEG")
32
+ return base64.b64encode(buffered.getvalue()).decode('utf-8')
33
+
34
+ def process_file(uploaded_file):
35
+ file_type = uploaded_file.name.split('.')[-1].lower()
36
+
37
+ if file_type in ["jpg", "jpeg", "png"]:
38
+ return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')}
39
+
40
+ code_extensions = ["html", "css", "php", "js", "py", "java", "c", "cpp"]
41
+ if file_type in ["txt"] + code_extensions:
42
+ return {"type": "text", "content": uploaded_file.read().decode("utf-8")}
43
+
44
+ if file_type in ["csv", "xlsx"]:
45
+ df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file)
46
+ return {"type": "text", "content": df.to_string()}
47
+
48
+ if file_type == "pdf":
49
+ reader = PyPDF2.PdfReader(uploaded_file)
50
+ return {"type": "text", "content": "".join(page.extract_text() for page in reader.pages if page.extract_text())}
51
+
52
+ if file_type == "zip":
53
+ with zipfile.ZipFile(uploaded_file) as z:
54
+ newline = "\n"
55
+ return {"type": "text", "content": f"ZIP Contents:{newline}{newline.join(z.namelist())}"}
56
+
57
+ return {"type": "error", "content": "Unsupported file format"}
58
+
59
+ # Sidebar für Einstellungen
60
+ with st.sidebar:
61
+ api_key = st.text_input("Google AI API Key", type="password")
62
+ model = st.selectbox("Model", [
63
+ "gemini-1.5-flash",
64
+ "gemini-1.5-pro",
65
+ "gemini-1.5-pro-vision-latest", # Vision-Modell für Bilder
66
+ "gemini-1.0-pro",
67
+ "gemini-1.0-pro-vision-latest", # Vision-Modell für Bilder
68
+ "gemini-2.0-pro-exp-02-05",
69
+ "gemini-2.0-flash-lite",
70
+ "gemini-2.0-flash-exp-image-generation", # Vision-Modell für Bilder
71
+ "gemini-2.0-flash",
72
+ "gemini-2.0-flash-thinking-exp-01-21"
73
+ ])
74
+ temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
75
+ max_tokens = st.slider("Max Tokens", 1, 2048, 1000)
76
+
77
+ # Datei-Upload
78
+ uploaded_file = st.file_uploader("Upload File (Image/Text/PDF/ZIP)",
79
+ type=["jpg", "jpeg", "png", "txt", "pdf", "zip",
80
+ "csv", "xlsx", "html", "css", "php", "js", "py"])
81
+
82
+ if uploaded_file:
83
+ processed = process_file(uploaded_file)
84
+ st.session_state.uploaded_content = processed
85
+
86
+ if processed["type"] == "image":
87
+ st.image(processed["content"], caption="Uploaded Image", use_container_width=True)
88
+ elif processed["type"] == "text":
89
+ st.text_area("File Preview", processed["content"], height=200)
90
+
91
+ # Chat-Historie anzeigen
92
+ for message in st.session_state.messages:
93
+ with st.chat_message(message["role"]):
94
+ st.markdown(message["content"])
95
+
96
+ # Chat-Eingabe verarbeiten
97
+ if prompt := st.chat_input("Your message..."):
98
+ if not api_key:
99
+ st.warning("API Key benötigt!")
100
+ st.stop()
101
+
102
+ try:
103
+ # API konfigurieren
104
+ genai.configure(api_key=api_key)
105
+
106
+ # Modell auswählen
107
+ model_instance = genai.GenerativeModel(model)
108
+
109
+ # Inhalt vorbereiten
110
+ content = [{"text": prompt}]
111
+
112
+ # Dateiinhalt hinzufügen
113
+ if st.session_state.uploaded_content:
114
+ if st.session_state.uploaded_content["type"] == "image":
115
+ if "vision" not in model.lower():
116
+ st.error("Bitte ein Vision-Modell für Bilder auswählen!")
117
+ st.stop()
118
+ content.append({
119
+ "inline_data": {
120
+ "mime_type": "image/jpeg",
121
+ "data": encode_image(st.session_state.uploaded_content["content"])
122
+ }
123
+ })
124
+ elif st.session_state.uploaded_content["type"] == "text":
125
+ content[0]["text"] += f"\n\n[File Content]\n{st.session_state.uploaded_content['content']}"
126
+
127
+ # Nachricht zur Historie hinzufügen
128
+ st.session_state.messages.append({"role": "user", "content": prompt})
129
+ with st.chat_message("user"):
130
+ st.markdown(prompt)
131
+
132
+ # Antwort generieren
133
+ response = model_instance.generate_content(
134
+ content,
135
+ generation_config=genai.types.GenerationConfig(
136
+ temperature=temperature,
137
+ max_output_tokens=max_tokens
138
+ )
139
+ )
140
+
141
+ # Überprüfen, ob die Antwort gültig ist
142
+ if not response.candidates:
143
+ st.error("API Error: Keine gültige Antwort erhalten. Überprüfe die Eingabe oder das Modell.")
144
+ else:
145
+ # Antwort anzeigen
146
+ with st.chat_message("assistant"):
147
+ st.markdown(response.text)
148
+ st.session_state.messages.append({"role": "assistant", "content": response.text})
149
+
150
+ except Exception as e:
151
+ st.error(f"API Error: {str(e)}")
152
+ if "vision" not in model and st.session_state.uploaded_content["type"] == "image":
153
+ st.error("Für Bilder einen Vision-fähigen Modell auswählen!")
154
+
155
+ # Instructions in the sidebar
156
+ with st.sidebar:
157
+ st.markdown("""
158
+ ## 📝 Instructions:
159
+ 1. Enter your Google AI API key
160
+ 2. Select a model (use vision models for image analysis)
161
+ 3. Adjust temperature and max tokens if needed
162
+ 4. Optional: Set a system prompt
163
+ 5. Upload an image (optional)
164
+ 6. Type your message and press Enter
165
+ ### About
166
+ 🔗 [GitHub Profile](https://github.com/volkansah) |
167
+ 📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) |
168
+ 💬 [Soon](https://aicodecraft.io)
169
+ """)