QandeelFatima commited on
Commit
3c5586c
Β·
1 Parent(s): 1fcec91

files added

Browse files
Files changed (3) hide show
  1. Makefile +27 -0
  2. app.py +68 -0
  3. requirements.txt +6 -0
Makefile ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ install:
2
+ pip install --upgrade pip &&\
3
+ pip install -r requirements.txt
4
+
5
+ test:
6
+ python -m pytest -vvv --cov=hello --cov=greeting \
7
+ --cov=smath --cov=web tests
8
+ python -m pytest --nbval notebook.ipynb #tests our jupyter notebook
9
+ #python -m pytest -v tests/test_web.py #if you just want to test web
10
+
11
+ debug:
12
+ python -m pytest -vv --pdb #Debugger is invoked
13
+
14
+ one-test:
15
+ python -m pytest -vv tests/test_greeting.py::test_my_name4
16
+
17
+ debugthree:
18
+ #not working the way I expect
19
+ python -m pytest -vv --pdb --maxfail=4 # drop to PDB for first three failures
20
+
21
+ format:
22
+ black *.py
23
+
24
+ lint:
25
+ pylint --disable=R,C *.py
26
+
27
+ all: install lint test format
app.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import re
3
+ import gradio as gr
4
+ from PIL import Image
5
+
6
+ from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
7
+ import os
8
+ import tensorflow as tf
9
+ os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
10
+
11
+ device='cpu'
12
+
13
+ model_id = "nttdataspain/vit-gpt2-stablediffusion2-lora"
14
+ model = VisionEncoderDecoderModel.from_pretrained(model_id)
15
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
16
+ feature_extractor = ViTFeatureExtractor.from_pretrained(model_id)
17
+
18
+ # Predict function
19
+ def predict(image):
20
+ img = image.convert('RGB')
21
+ model.eval()
22
+ pixel_values = feature_extractor(images=[img], return_tensors="pt").pixel_values
23
+ with torch.no_grad():
24
+ output_ids = model.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True).sequences
25
+
26
+ preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
27
+ preds = [pred.strip() for pred in preds]
28
+ return preds[0]
29
+
30
+ input = gr.inputs.Image(label="Upload any Image", type = 'pil', optional=True)
31
+ output = gr.outputs.Textbox(type="text",label="Captions")
32
+ examples_folder = os.path.join(os.path.dirname(__file__), "examples")
33
+ examples = [os.path.join(examples_folder, file) for file in os.listdir(examples_folder)]
34
+
35
+ with gr.Blocks() as demo:
36
+
37
+ gr.HTML(
38
+ """
39
+ <div style="text-align: center; max-width: 1200px; margin: 20px auto;">
40
+ <h2 style="font-weight: 900; font-size: 3rem; margin: 0rem">
41
+ πŸ“Έ ViT Image-to-Text with LORA πŸ“
42
+ </h2>
43
+ <h2 style="text-align: left; font-weight: 450; font-size: 1rem; margin-top: 2rem; margin-bottom: 1.5rem">
44
+ In the field of large language models, the challenge of fine-tuning has long perplexed researchers. Microsoft, however, has unveiled an innovative solution called <b>Low-Rank Adaptation (LoRA)</b>. With the emergence of behemoth models like GPT-3 boasting billions of parameters, the cost of fine-tuning them for specific tasks or domains has become exorbitant.
45
+ <br>
46
+ <br>
47
+ You can find more info here: <u><a href="https://medium.com/@daniel.puenteviejo/fine-tuning-image-to-text-algorithms-with-lora-deb22aa7da27" target="_blank">Medium article</a></u>
48
+ </h2>
49
+ </div>
50
+ """)
51
+
52
+ with gr.Row():
53
+ with gr.Column(scale=1):
54
+ img = gr.inputs.Image(label="Upload any Image", type = 'pil', optional=True)
55
+ button = gr.Button(value="Describe")
56
+ with gr.Column(scale=1):
57
+ out = gr.outputs.Textbox(type="text",label="Captions")
58
+
59
+ button.click(predict, inputs=[img], outputs=[out])
60
+
61
+ gr.Examples(
62
+ examples=examples,
63
+ inputs=img,
64
+ outputs=out,
65
+ fn=predict,
66
+ cache_examples=True,
67
+ )
68
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ transformers
3
+ pillow
4
+ requests
5
+ torch
6
+ tensorflow