File size: 1,593 Bytes
a5d977a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235ef43
a5d977a
 
235ef43
a5d977a
 
 
 
 
 
 
 
 
 
 
 
 
235ef43
 
 
8960167
a5d977a
bba69da
 
a5d977a
 
bba69da
8960167
a5d977a
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# impoprt packages
import torch
import requests
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration, AutoTokenizer, pipeline
import sentencepiece
import gradio as gr

# Image captioning model
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")

# Translate en to ar
model_translater = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-en-ar")

# conditional image captioning (with prefix-)
def image_captioning(image, prefix="a "):
  """ Return text (As str) to describe an image """
  # Process the image
  inputs = processor(image, prefix, return_tensors="pt")

  # Generate text to describe the image
  output = model.generate(**inputs)

  # Decode the output
  output = processor.decode(output[0], skip_special_tokens=True, max_length=80)
  return output

def translate_text(text, to="ar"):
  """ Return translated text """
  translated_text = model_translater(str(text))
  return translated_text[0]['translation_text']

def image_captioning_ar(image, prefix = "a "):
  if image:
    text = image_captioning(image, prefix=prefix)
    return text, translate_text(text)
  return null

input_image = gr.inputs.Image(type="pil", label = 'Upload your image')
imageCaptioning_interface = gr.Interface(
    fn = image_captioning_ar,
    inputs=input_image,
    outputs=[gr.outputs.Textbox(label="Caption (en)"), gr.outputs.Textbox(label="Caption (ar)")],
    title = 'Image captioning',
)
imageCaptioning_interface.launch()