Spaces:
Sleeping
Sleeping
khaledeng15
commited on
Commit
·
e3c1dfe
1
Parent(s):
4189377
init
Browse files- CMD.md +1 -0
- main_page.py +28 -0
- pages/blip-image-captioning.py +16 -0
- pages/noon.py +25 -0
- pages/text-genration.py +0 -0
- requirements.txt +3 -0
CMD.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
streamlit run main_page.py
|
main_page.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
st.set_page_config(
|
4 |
+
page_title="Hello",
|
5 |
+
page_icon="👋",
|
6 |
+
)
|
7 |
+
|
8 |
+
st.write("# Welcome to Streamlit! 👋")
|
9 |
+
|
10 |
+
st.sidebar.success("Select a demo above.")
|
11 |
+
|
12 |
+
st.markdown(
|
13 |
+
"""
|
14 |
+
Streamlit is an open-source app framework built specifically for
|
15 |
+
Machine Learning and Data Science projects.
|
16 |
+
**👈 Select a demo from the sidebar** to see some examples
|
17 |
+
of what Streamlit can do!
|
18 |
+
### Want to learn more?
|
19 |
+
- Check out [streamlit.io](https://streamlit.io)
|
20 |
+
- Jump into our [documentation](https://docs.streamlit.io)
|
21 |
+
- Ask a question in our [community
|
22 |
+
forums](https://discuss.streamlit.io)
|
23 |
+
### See more complex demos
|
24 |
+
- Use a neural net to [analyze the Udacity Self-driving Car Image
|
25 |
+
Dataset](https://github.com/streamlit/demo-self-driving)
|
26 |
+
- Explore a [New York City rideshare dataset](https://github.com/streamlit/demo-uber-nyc-pickups)
|
27 |
+
"""
|
28 |
+
)
|
pages/blip-image-captioning.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
|
4 |
+
pipe = pipeline("image-to-text",
|
5 |
+
model="Salesforce/blip-image-captioning-base")
|
6 |
+
|
7 |
+
|
8 |
+
def launch(input):
|
9 |
+
out = pipe(input)
|
10 |
+
return out[0]['generated_text']
|
11 |
+
|
12 |
+
iface = gr.Interface(launch,
|
13 |
+
inputs=gr.Image(type='pil'),
|
14 |
+
outputs="text")
|
15 |
+
|
16 |
+
iface.launch()
|
pages/noon.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from transformers import BloomTokenizerFast, BloomForCausalLM, pipeline
|
2 |
+
|
3 |
+
|
4 |
+
# text="اكتب مقالا من عدة أسطر عن الذكاء الصناعي وتطوراته"
|
5 |
+
# prompt = f'Instruction:\n{text}\n\nResponse:'
|
6 |
+
|
7 |
+
# model = BloomForCausalLM.from_pretrained('Naseej/noon-7b')
|
8 |
+
|
9 |
+
# tokenizer = BloomTokenizerFast.from_pretrained('Naseej/noon-7b')
|
10 |
+
|
11 |
+
# generation_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
12 |
+
|
13 |
+
# # We recommend the provided hyperparameters for generation
|
14 |
+
# # But encourage you to try different values
|
15 |
+
# response = generation_pipeline(prompt,
|
16 |
+
# pad_token_id=tokenizer.eos_token_id,
|
17 |
+
# do_sample=False,
|
18 |
+
# num_beams=4,
|
19 |
+
# max_length=500,
|
20 |
+
# top_p=0.1,
|
21 |
+
# top_k=20,
|
22 |
+
# repetition_penalty = 3.0,
|
23 |
+
# no_repeat_ngram_size=3)[0]['generated_text']
|
24 |
+
|
25 |
+
# print(response)
|
pages/text-genration.py
ADDED
File without changes
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch
|
3 |
+
gradio
|