Spaces:
Running
Running
File size: 1,596 Bytes
a936419 40ce5ac 085ef0b 40ce5ac 1605c68 a936419 085ef0b 40ce5ac 0963c3d 085ef0b cb7bc65 6ad3993 cb7bc65 ee8bb54 cb7bc65 40ce5ac cb7bc65 79b0e5e 40ce5ac 79b0e5e c3b4363 79b0e5e 40ce5ac 79b0e5e 40ce5ac fa566da 40ce5ac e6bff66 085ef0b 79b0e5e 85deaff 40ce5ac 5399f24 40ce5ac 085ef0b 40ce5ac e5d9b98 085ef0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import gradio as gr
import requests
import os
import json
import google.generativeai as genai
# Load environment variables
genai.configure(api_key=os.environ["geminiapikey"])
read_key = os.environ.get('HF_TOKEN', None)
custom_css = """
#md {
height: 400px;
font-size: 30px;
background: #202020;
padding: 20px;
color: white;
border: 1 px solid white;
}
"""
def predict(prompt):
# Create the model
generation_config = {
"temperature": 0.3,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 2048,
"response_mime_type": "text/plain",
}
model = genai.GenerativeModel(
#model_name="gemini-1.5-pro",
model_name="gemini-2.0-flash-exp",
generation_config=generation_config,
)
chat_session = model.start_chat(
history=[
]
)
response = chat_session.send_message(prompt)
#response = model.generate_content(contents=prompt, tools='google_search_retrieval')
return response.text
# Create the Gradio interface
with gr.Blocks(css=custom_css) as demo:
with gr.Row():
details_output = gr.Markdown(label="answer", elem_id="md")
#details_output = gr.Textbox(label="Ausgabe", value = f"\n\n\n\n")
with gr.Row():
ort_input = gr.Textbox(label="prompt", placeholder="ask anything...")
with gr.Row():
button = gr.Button("Senden")
# Connect the button to the function
button.click(fn=predict, inputs=ort_input, outputs=details_output)
# Launch the Gradio application
demo.launch() |