Spaces:
Sleeping
Sleeping
khaledeng15
commited on
Commit
·
98ed57d
1
Parent(s):
a827854
add files
Browse files- app.py +10 -16
- helper/__pycache__/image_helper.cpython-312.pyc +0 -0
- helper/image_helper.py +6 -0
- pages/blip-image-captioning.py +21 -4
- pages/noon.py +21 -18
app.py
CHANGED
@@ -5,24 +5,18 @@ st.set_page_config(
|
|
5 |
page_icon="👋",
|
6 |
)
|
7 |
|
8 |
-
st.write("# Welcome to
|
9 |
|
10 |
-
|
11 |
|
12 |
st.markdown(
|
13 |
"""
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
forums](https://discuss.streamlit.io)
|
23 |
-
### See more complex demos
|
24 |
-
- Use a neural net to [analyze the Udacity Self-driving Car Image
|
25 |
-
Dataset](https://github.com/streamlit/demo-self-driving)
|
26 |
-
- Explore a [New York City rideshare dataset](https://github.com/streamlit/demo-uber-nyc-pickups)
|
27 |
-
"""
|
28 |
)
|
|
|
5 |
page_icon="👋",
|
6 |
)
|
7 |
|
8 |
+
st.write("# Welcome to Khaled Space! 👋")
|
9 |
|
10 |
+
|
11 |
|
12 |
st.markdown(
|
13 |
"""
|
14 |
+
Welcome to **Khaled's AI Learning Hub**! 🚀
|
15 |
+
|
16 |
+
This platform is dedicated to showcasing AI development projects, all designed to help you explore and understand the power of artificial intelligence. 🤖💡
|
17 |
+
|
18 |
+
**👈 Select a project from the sidebar** to see hands-on examples ranging from data processing to model deployment. Each project page will guide you through different aspects of AI development, helping you gain practical insights.
|
19 |
+
|
20 |
+
|
21 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
)
|
helper/__pycache__/image_helper.cpython-312.pyc
ADDED
Binary file (509 Bytes). View file
|
|
helper/image_helper.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
|
3 |
+
def to_base64(uploaded_file):
|
4 |
+
file_buffer = uploaded_file.read()
|
5 |
+
b64 = base64.b64encode(file_buffer).decode()
|
6 |
+
return f"data:image/png;base64,{b64}"
|
pages/blip-image-captioning.py
CHANGED
@@ -1,22 +1,39 @@
|
|
1 |
-
# import gradio as gr
|
2 |
import streamlit as st
|
3 |
from io import StringIO
|
|
|
|
|
|
|
4 |
|
5 |
from transformers import pipeline
|
6 |
|
|
|
|
|
7 |
pipe = pipeline("image-to-text",
|
8 |
model="Salesforce/blip-image-captioning-base")
|
9 |
|
10 |
def process_file():
|
11 |
-
|
12 |
-
|
|
|
|
|
13 |
def launch(input):
|
14 |
out = pipe(input)
|
15 |
return out[0]['generated_text']
|
16 |
|
17 |
-
uploaded_file = st.file_uploader("Choose a file", on_change=process_file)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
|
|
|
|
20 |
|
21 |
# iface = gr.Interface(launch,
|
22 |
# inputs=gr.Image(type='pil'),
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from io import StringIO
|
3 |
+
from PIL import Image
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
|
7 |
from transformers import pipeline
|
8 |
|
9 |
+
from helper.image_helper import to_base64
|
10 |
+
|
11 |
pipe = pipeline("image-to-text",
|
12 |
model="Salesforce/blip-image-captioning-base")
|
13 |
|
14 |
def process_file():
|
15 |
+
stringio = StringIO(uploaded_file.getvalue().decode("utf-8"))
|
16 |
+
txt = launch(stringio)
|
17 |
+
st.write(txt)
|
18 |
+
|
19 |
def launch(input):
|
20 |
out = pipe(input)
|
21 |
return out[0]['generated_text']
|
22 |
|
|
|
23 |
|
24 |
+
# uploaded_file = st.file_uploader("Choose a file", on_change=process_file)
|
25 |
+
|
26 |
+
uploaded_file = st.file_uploader("Choose a file")
|
27 |
+
if uploaded_file is not None:
|
28 |
+
# st.image(uploaded_file)
|
29 |
+
image = Image.open(uploaded_file)
|
30 |
+
|
31 |
+
# bytes_data = uploaded_file.getvalue()
|
32 |
+
base64 = to_base64(uploaded_file)
|
33 |
+
st.image(base64)
|
34 |
|
35 |
+
txt = launch(base64)
|
36 |
+
st.write(txt)
|
37 |
|
38 |
# iface = gr.Interface(launch,
|
39 |
# inputs=gr.Image(type='pil'),
|
pages/noon.py
CHANGED
@@ -1,25 +1,28 @@
|
|
1 |
-
|
2 |
|
|
|
3 |
|
4 |
-
# text="اكتب مقالا من عدة أسطر عن الذكاء الصناعي وتطوراته"
|
5 |
-
# prompt = f'Instruction:\n{text}\n\nResponse:'
|
6 |
|
7 |
-
|
|
|
8 |
|
9 |
-
|
10 |
|
11 |
-
|
12 |
|
13 |
-
|
14 |
-
# # But encourage you to try different values
|
15 |
-
# response = generation_pipeline(prompt,
|
16 |
-
# pad_token_id=tokenizer.eos_token_id,
|
17 |
-
# do_sample=False,
|
18 |
-
# num_beams=4,
|
19 |
-
# max_length=500,
|
20 |
-
# top_p=0.1,
|
21 |
-
# top_k=20,
|
22 |
-
# repetition_penalty = 3.0,
|
23 |
-
# no_repeat_ngram_size=3)[0]['generated_text']
|
24 |
|
25 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
|
3 |
+
from transformers import BloomTokenizerFast, BloomForCausalLM, pipeline
|
4 |
|
|
|
|
|
5 |
|
6 |
+
text="اكتب مقالا من عدة أسطر عن الذكاء الصناعي وتطوراته"
|
7 |
+
prompt = f'Instruction:\n{text}\n\nResponse:'
|
8 |
|
9 |
+
model = BloomForCausalLM.from_pretrained('Naseej/noon-7b')
|
10 |
|
11 |
+
tokenizer = BloomTokenizerFast.from_pretrained('Naseej/noon-7b')
|
12 |
|
13 |
+
generation_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
# We recommend the provided hyperparameters for generation
|
16 |
+
# But encourage you to try different values
|
17 |
+
response = generation_pipeline(prompt,
|
18 |
+
pad_token_id=tokenizer.eos_token_id,
|
19 |
+
do_sample=False,
|
20 |
+
num_beams=4,
|
21 |
+
max_length=500,
|
22 |
+
top_p=0.1,
|
23 |
+
top_k=20,
|
24 |
+
repetition_penalty = 3.0,
|
25 |
+
no_repeat_ngram_size=3)[0]['generated_text']
|
26 |
+
|
27 |
+
# print(response)
|
28 |
+
st.write(response)
|