|
|
|
import torch |
|
import requests |
|
from PIL import Image |
|
from transformers import BlipProcessor, BlipForConditionalGeneration, AutoTokenizer, pipeline |
|
import sentencepiece |
|
import gradio as gr |
|
|
|
|
|
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") |
|
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") |
|
|
|
|
|
model_translater = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-en-ar") |
|
|
|
|
|
def image_captioning(image, prefix="a "): |
|
""" Return text (As str) to describe an image """ |
|
|
|
inputs = processor(image, prefix, return_tensors="pt") |
|
|
|
|
|
output = model.generate(**inputs) |
|
|
|
|
|
output = processor.decode(output[0], skip_special_tokens=True, max_length=80) |
|
return output |
|
|
|
def translate_text(text, to="ar"): |
|
""" Return translated text """ |
|
translated_text = model_translater(str(text)) |
|
return translated_text[0]['translation_text'] |
|
|
|
def image_captioning_ar(image, prefix = "a "): |
|
if image: |
|
text = image_captioning(image, prefix=prefix) |
|
return text, translate_text(text) |
|
return null |
|
|
|
input_image = gr.inputs.Image(type="pil", label = 'Upload your image') |
|
imageCaptioning_interface = gr.Interface( |
|
fn = image_captioning_ar, |
|
inputs=input_image, |
|
outputs=[gr.outputs.Textbox(label="Caption (en)"), gr.outputs.Textbox(label="Caption (ar)")], |
|
title = 'Image captioning', |
|
) |
|
imageCaptioning_interface.launch() |