Spaces:
Runtime error
Runtime error
File size: 1,118 Bytes
53d6474 eb09c16 cad1126 54e474d cad1126 87bd002 1674572 acf5da7 87bd002 6f8418a dfef0b8 87bd002 6f8418a eb09c16 a8cd504 acf5da7 2471c01 6f8418a 87bd002 1674572 87bd002 54e474d 6f8418a f718f04 54e474d 302d1be 7880ed9 1674572 f718f04 302d1be 0930360 f718f04 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import torch
import theme
import chatbot
theme = theme.Theme()
# Cell 1: Image Classification Model
image_pipeline = pipeline(task="image-classification", model="guillen/vit-basura-test1")
def predict_image(input_img):
predictions = image_pipeline(input_img)
return {p["label"]: p["score"] for p in predictions}
image_gradio_app = gr.Interface(
fn=predict_image,
inputs=gr.Image(label="Image", sources=['upload', 'webcam'], type="pil"),
outputs=[gr.Label(label="Result")],
title="Green Greta",
theme=theme
)
# Cell 2: Chatbot Model
def qa_response(user_message, chat_history, context):
response = qa_chain.predict(user_message, chat_history, context=context)
return response
chatbot_gradio_app = gr.ChatInterface(
fn=qa_response,
title="Green Greta",
theme=theme
)
# Combine both interfaces into a single app
gr.TabbedInterface(
[image_gradio_app, chatbot_gradio_app],
tab_names=["Green Greta Image Classification","Green Greta Chat"],
theme=theme
).launch() |