File size: 1,212 Bytes
5fe15cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
from base64 import b64decode, b64encode
from io import BytesIO

from fastapi import FastAPI, File, Form
from PIL import Image
from transformers import pipeline


description = """
## DocQA with 🤗 transformers, FastAPI, and Docker

This app shows how to do Document Question Answering using
FastAPI in a Docker Space 🚀
Check out the docs for the `/predict` endpoint below to try it out!
"""

# NOTE - we configure docs_url to serve the interactive Docs at the root path
# of the app. This way, we can use the docs as a landing page for the app on Spaces.
app = FastAPI(docs_url="/", description=description)

pipe = pipeline("document-question-answering", model="impira/layoutlm-document-qa")


@app.post("/predict")
def predict(image_file: bytes = File(...), question: str = Form(...)):
    """
    Using the document-question-answering pipeline from `transformers`, take
    a given input document (image) and a question about it, and return the
    predicted answer. The model used is available on the hub at:
    [`impira/layoutlm-document-qa`](https://huggingface.co/impira/layoutlm-document-qa).
    """
    image = Image.open(BytesIO(image_file))
    output = pipe(image, question)
    return output