Yiwahpsp commited on
Commit
6c4a874
1 Parent(s): e64a807

Start app v.1

Browse files
Files changed (2) hide show
  1. app.py +106 -0
  2. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import io
3
+ import pandas as pd
4
+ import plotly_express as px
5
+ import streamlit as st
6
+ import torch
7
+ import torch.nn.functional as F
8
+ import numpy as np
9
+ from easyocr import Reader
10
+ from PIL import Image
11
+ from transformers import (
12
+ LayoutLMv3ImageProcessor,
13
+ LayoutLMv3ForSequenceClassification,
14
+ LayoutLMv3Processor,
15
+ LayoutLMv3TokenizerFast,
16
+ )
17
+
18
+
19
+ DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
20
+ MICROSOFT_MODEL_NAME = "microsoft/layoutlmv3-base"
21
+ MODEL_NAME = "curiousily/layoutlmv3-financial-document-classification"
22
+
23
+ def create_bounding_box(bbox_data, width_scale: float, height_scale: float):
24
+ xs = []
25
+ ys = []
26
+ for x, y in bbox_data:
27
+ xs.append(x)
28
+ ys.append(y)
29
+
30
+ left = int(min(xs) * width_scale)
31
+ top = int(min(ys) * height_scale)
32
+ right = int(max(xs) * width_scale)
33
+ bottom = int(max(ys) * height_scale)
34
+
35
+ return [left, top, right, bottom]
36
+
37
+ @st.cache_resource
38
+ def create_ocr_reader():
39
+ return Reader(["en"])
40
+
41
+ @st.cache_resource
42
+ def create_processor():
43
+ feature_extractor = LayoutLMv3ImageProcessor(apply_ocr = False)
44
+ tokenizer = LayoutLMv3TokenizerFast.from_pretrained(MICROSOFT_MODEL_NAME)
45
+ return LayoutLMv3Processor(feature_extractor, tokenizer)
46
+
47
+ @st.cache_resource
48
+ def create_model():
49
+ model = LayoutLMv3ForSequenceClassification.from_pretrained(MODEL_NAME)
50
+ return model.eval().to(DEVICE)
51
+
52
+ def predict(image: Image, reader: Reader, processor: LayoutLMv3Processor, model: LayoutLMv3ForSequenceClassification):
53
+ ocr_result = reader.readtext(image)
54
+
55
+ width, height = image.size
56
+ width_scale = 1000 / width
57
+ height_scale = 1000 / height
58
+
59
+ words = []
60
+ boxes = []
61
+
62
+ for bbox, word, confidence in ocr_result:
63
+ words.append(word)
64
+ boxes.append(create_bounding_box(bbox, width_scale, height_scale))
65
+
66
+ encoding = processor(image, words, boxes = boxes, max_length=512, padding = "max_length", truncation = True, return_tensors = "pt")
67
+
68
+ with torch.inference_mode():
69
+ output = model(input_ids = encoding["input_ids"].to(DEVICE),
70
+ attention_mask = encoding["attention_mask"].to(DEVICE),
71
+ bbox = encoding["bbox"].to(DEVICE),
72
+ pixel_values = encoding["pixel_values"].to(DEVICE))
73
+
74
+ logits = output.logits
75
+ predicted_class = logits.argmax()
76
+ probabilities = F.softmax(logits, dim=-1).flatten().tolist()
77
+
78
+ return predicted_class.cpu().item(), probabilities
79
+
80
+ reader = create_ocr_reader()
81
+ processor = create_processor()
82
+ model = create_model()
83
+
84
+ upload_file = st.file_uploader("Upload Document Image", ["jpg", "png"])
85
+ if upload_file is not None:
86
+ bytes_data = io.BytesIO(upload_file.getvalue())
87
+ image = Image.open(bytes_data)
88
+ st.image(image, "Your Document Image")
89
+
90
+ predicted_class, probabilities = predict(image, reader, processor, model)
91
+ print("Predicted class:",predicted_class)
92
+ print("Probabilities:",probabilities)
93
+ # print(predict(image, reader, processor, model))
94
+
95
+ predicted_label = model.config.id2label[predicted_class]
96
+
97
+ st.markdown(f"Predicted document type: **{predicted_label}**")
98
+
99
+ # make chart
100
+ df_predictions = pd.DataFrame({
101
+ "Document" : list(model.config.id2label.values()),
102
+ "confidence" : probabilities
103
+ })
104
+
105
+ fig = px.bar(df_predictions, x = "Document", y = "confidence", title = "Document Type Confidence")
106
+ st.plotly_chart(fig, use_container_width=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ easyocr==1.6.2
2
+ pandas==1.5.3
3
+ Pillow==9.4.0
4
+ plotly-express==0.4.1
5
+ torch==1.13.1
6
+ transformers==4.25.1