import os
os.system('pip install torch==1.8.0+cpu torchvision==0.9.0+cpu -f https://download.pytorch.org/whl/torch_stable.html')
os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html')
import gradio as gr
import numpy as np
from transformers import LayoutLMv2Processor, LayoutLMv2ForTokenClassification
from datasets import load_dataset
from PIL import Image, ImageDraw, ImageFont
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
model = LayoutLMv2ForTokenClassification.from_pretrained("Theivaprakasham/layoutlmv2-finetuned-sroie")
# load image example
dataset = load_dataset("darentang/sroie", split="test")
Image.open(dataset[50]["image_path"]).convert("RGB").save("example1.png")
Image.open(dataset[14]["image_path"]).convert("RGB").save("example2.png")
Image.open(dataset[20]["image_path"]).convert("RGB").save("example3.png")
# define id2label, label2color
labels = dataset.features['ner_tags'].feature.names
id2label = {v: k for v, k in enumerate(labels)}
label2color = {'B-ADDRESS': 'blue',
'B-COMPANY': 'green',
'B-DATE': 'red',
'B-TOTAL': 'red',
'I-ADDRESS': "blue",
'I-COMPANY': 'green',
'I-DATE': 'red',
'I-TOTAL': 'red',
'O': 'green'}
label2color = dict((k.lower(), v.lower()) for k,v in label2color.items())
def unnormalize_box(bbox, width, height):
return [
width * (bbox[0] / 1000),
height * (bbox[1] / 1000),
width * (bbox[2] / 1000),
height * (bbox[3] / 1000),
]
def iob_to_label(label):
return label
def process_image(image):
width, height = image.size
# encode
encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt")
offset_mapping = encoding.pop('offset_mapping')
# forward pass
outputs = model(**encoding)
# get predictions
predictions = outputs.logits.argmax(-1).squeeze().tolist()
token_boxes = encoding.bbox.squeeze().tolist()
# only keep non-subword predictions
is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0
true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]]
true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]]
# draw predictions over the image
draw = ImageDraw.Draw(image)
font = ImageFont.load_default()
for prediction, box in zip(true_predictions, true_boxes):
predicted_label = iob_to_label(prediction).lower()
draw.rectangle(box, outline=label2color[predicted_label])
draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font)
return image
title = "Bill Information extraction using LayoutLMv2 model"
description = "Bill Information Extraction - We use Microsoft's LayoutLMv2 trained on SROIE Dataset to predict the Company Name, Address, Date, and Total Amount from Bills. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
article="References
[1] Y. Xu et al., “LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding.” 2022. Paper Link
[2] LayoutLMv2 training and inference"
examples =[['example1.png'],['example2.png'],['example3.png']]
css = """.output_image, .input_image {height: 600px !important}"""
iface = gr.Interface(fn=process_image,
inputs=gr.inputs.Image(type="pil"),
outputs=gr.outputs.Image(type="pil", label="annotated image"),
title=title,
description=description,
article=article,
examples=examples,
css=css,
analytics_enabled = True, enable_queue=True)
iface.launch(inline=False, debug=False)