test
Browse files- images/1.png +3 -0
- images/2.png +3 -0
- images/21.png +3 -0
- images/ggg.jpg +3 -0
- images/testvideo.mp4 +3 -0
- images/ветрогенератор_Wind9.jpeg +3 -0
- images/опухоль_zlokachestvennaya-opuhol.jpg +3 -0
- main.py +4 -0
- packages.txt +3 -0
- pages/2_Brain tumor.py +100 -0
- pages/3_textcleaner.py +44 -0
- pages/Turbine.py +63 -0
- pages/__init__.py +0 -0
- requirements.txt +82 -0
- weights/__init__.py +0 -0
- weights/autoencoder.pt +3 -0
- weights/braintumor.pt +3 -0
- weights/model.py +3 -0
- weights/preprocessing.py +3 -0
- weights/turbine.pt +3 -0
images/1.png
ADDED
![]() |
Git LFS Details
|
images/2.png
ADDED
![]() |
Git LFS Details
|
images/21.png
ADDED
![]() |
Git LFS Details
|
images/ggg.jpg
ADDED
![]() |
Git LFS Details
|
images/testvideo.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7bf1d40b9cd1f19e0eb57656d71df18a37f1d1f2a00e550bf6c6bd82577ccbb5
|
3 |
+
size 18275551
|
images/ветрогенератор_Wind9.jpeg
ADDED
![]() |
Git LFS Details
|
images/опухоль_zlokachestvennaya-opuhol.jpg
ADDED
![]() |
Git LFS Details
|
main.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
st.title('Проект по computer vision')
|
4 |
+
|
packages.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
freeglut3-dev
|
2 |
+
libgtk2.0-dev
|
3 |
+
libgl1-mesa-glx
|
pages/2_Brain tumor.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
import torch
|
4 |
+
from torchvision import io
|
5 |
+
import numpy as np
|
6 |
+
import PIL
|
7 |
+
import os
|
8 |
+
import matplotlib.pyplot as plt
|
9 |
+
import requests
|
10 |
+
from io import BytesIO
|
11 |
+
import base64
|
12 |
+
|
13 |
+
from torchvision import transforms as T
|
14 |
+
model = torch.hub.load(
|
15 |
+
'ultralytics/yolov5',
|
16 |
+
'custom',
|
17 |
+
path='weights/braintumor.pt',
|
18 |
+
)
|
19 |
+
|
20 |
+
model.eval()
|
21 |
+
model.conf = 0.3
|
22 |
+
st.title('Brain tumor detection')
|
23 |
+
|
24 |
+
st.title("Single Detection")
|
25 |
+
uploaded_file = st.file_uploader("Загрузите фотографию", type=["png", "jpg", "jpeg"])
|
26 |
+
|
27 |
+
if uploaded_file is not None:
|
28 |
+
image = Image.open(uploaded_file)
|
29 |
+
st.image(image, caption='Загруженное изображение', use_column_width=True)
|
30 |
+
|
31 |
+
if st.button("Определить наличие опухоли"):
|
32 |
+
image.save("ggg.jpg")
|
33 |
+
|
34 |
+
# Image
|
35 |
+
img = 'ggg.jpg'
|
36 |
+
# Inference
|
37 |
+
results = model(img)
|
38 |
+
# results.show() # or .show(), .save(), .crop(), .pandas(), etc
|
39 |
+
annotated_img = results.render()[0]
|
40 |
+
st.image(annotated_img, caption='Результат', use_column_width=True)
|
41 |
+
|
42 |
+
st.title("Multiple Detection")
|
43 |
+
uploaded_files = st.file_uploader("Upload your images", accept_multiple_files=True)
|
44 |
+
|
45 |
+
images = []
|
46 |
+
|
47 |
+
if uploaded_files is not None:
|
48 |
+
for image in uploaded_files:
|
49 |
+
# Преобразуйте файл изображения в объект PIL
|
50 |
+
image = Image.open(image)
|
51 |
+
st.image(image, caption='Original Image', use_column_width=True)
|
52 |
+
images.append(image)
|
53 |
+
|
54 |
+
if st.button("Detection (Multiple)"):
|
55 |
+
# Примените модель YOLOv5 к изображению
|
56 |
+
for image in images:
|
57 |
+
|
58 |
+
results = model(image)
|
59 |
+
|
60 |
+
# Отобразите результаты
|
61 |
+
st.image(results.render()[0], caption='Detection Result', use_column_width=True)
|
62 |
+
|
63 |
+
|
64 |
+
st.title("Upload image by URL")
|
65 |
+
|
66 |
+
# Ввод URL изображения
|
67 |
+
url = st.text_input("Enter image URL:")
|
68 |
+
|
69 |
+
|
70 |
+
|
71 |
+
# При нажатии на кнопку "Загрузить", выводим изображение
|
72 |
+
if url:
|
73 |
+
try:
|
74 |
+
if url.startswith("data:image"):
|
75 |
+
# Handle data URI
|
76 |
+
image_data = url.split(',')[1]
|
77 |
+
image_binary = base64.b64decode(image_data)
|
78 |
+
image = Image.open(BytesIO(image_binary))
|
79 |
+
else:
|
80 |
+
response = requests.get(url)
|
81 |
+
image_bytes = response.content
|
82 |
+
image = Image.open(BytesIO(image_bytes))
|
83 |
+
|
84 |
+
st.image(image, caption='Uploaded image', use_column_width=True)
|
85 |
+
|
86 |
+
result = model(image)
|
87 |
+
|
88 |
+
# Отобразите результаты
|
89 |
+
st.image(result.render()[0], caption='Detection Result', use_column_width=True)
|
90 |
+
|
91 |
+
except Exception as e:
|
92 |
+
st.error("Error: " + str(e))
|
93 |
+
|
94 |
+
#if st.button("Detection (URL)"):
|
95 |
+
|
96 |
+
# Inference
|
97 |
+
#result = model(image)
|
98 |
+
|
99 |
+
# Отобразите результаты
|
100 |
+
#st.image(result.render()[0], caption='Detection Result', use_column_width=True)
|
pages/3_textcleaner.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
from PIL import Image
|
6 |
+
from weights.model import ConvAutoencoder
|
7 |
+
from weights.preprocessing import preprocess
|
8 |
+
from torchvision.io import read_image
|
9 |
+
|
10 |
+
@st.cache_resource
|
11 |
+
|
12 |
+
def load_model():
|
13 |
+
model = ConvAutoencoder()
|
14 |
+
model.load_state_dict(torch.load('weights/autoencoder.pt', map_location = 'cpu'))
|
15 |
+
return model
|
16 |
+
|
17 |
+
DEVICE = 'cpu'
|
18 |
+
model = load_model()
|
19 |
+
model.to(DEVICE)
|
20 |
+
model.eval()
|
21 |
+
|
22 |
+
st.title('Denoising images with noise')
|
23 |
+
|
24 |
+
loaded_image = st.file_uploader('Загрузите картинку с текстом')
|
25 |
+
|
26 |
+
def predict(img):
|
27 |
+
img = preprocess(img)
|
28 |
+
img.to(DEVICE)
|
29 |
+
outputs = model(img.unsqueeze(0))
|
30 |
+
pred = outputs.detach().cpu().squeeze(0).numpy()
|
31 |
+
return pred
|
32 |
+
|
33 |
+
if loaded_image:
|
34 |
+
img = Image.open(loaded_image)
|
35 |
+
prediction = predict(img)
|
36 |
+
left_col, right_col = st.columns(2)
|
37 |
+
with left_col:
|
38 |
+
st.write('Original text')
|
39 |
+
st.image(img)
|
40 |
+
with right_col:
|
41 |
+
st.write('Denoised text')
|
42 |
+
st.image(prediction[0])
|
43 |
+
|
44 |
+
|
pages/Turbine.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
import streamlit as st
|
3 |
+
from PIL import Image
|
4 |
+
import io
|
5 |
+
import os
|
6 |
+
import torch
|
7 |
+
import torchvision.transforms as transforms
|
8 |
+
import requests
|
9 |
+
from torchvision import transforms as T
|
10 |
+
import base64
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
@st.cache_resource
|
14 |
+
|
15 |
+
def load_model(weights_path):
|
16 |
+
model = YOLO(weights_path)
|
17 |
+
return model
|
18 |
+
|
19 |
+
model = load_model('weights/turbine.pt')
|
20 |
+
model = model.cpu()
|
21 |
+
|
22 |
+
st.title('Turbine and cable tower detection')
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
image_type = st.radio("Способ загрузки", ["Олд", "Ньюфаг", "Тест"])
|
27 |
+
|
28 |
+
if image_type == "Олд":
|
29 |
+
# image = st.file_uploader('Загрузи файл', type=['jpg', 'jpeg', 'png'])
|
30 |
+
uploaded_images = st.file_uploader("Загрузите изображения", type=["jpg", "png"], accept_multiple_files=True)
|
31 |
+
if uploaded_images is not None:
|
32 |
+
for image in uploaded_images:
|
33 |
+
image = Image.open(image)
|
34 |
+
# image_bytes = image.read()
|
35 |
+
# image = Image.open(io.BytesIO(image_bytes))
|
36 |
+
results = model.predict(image)
|
37 |
+
result = results[0]
|
38 |
+
img = Image.fromarray(result.plot()[:, :, ::-1])
|
39 |
+
st.image(img)
|
40 |
+
|
41 |
+
|
42 |
+
if image_type == "Ньюфаг":
|
43 |
+
image_url = st.text_input("Введите URL изображения для загрузки")
|
44 |
+
if image_url:
|
45 |
+
if image_url.startswith("data:image"):
|
46 |
+
# Handle data URI
|
47 |
+
image_data = image_url.split(',')[1]
|
48 |
+
image_binary = base64.b64decode(image_data)
|
49 |
+
image = Image.open(io.BytesIO(image_binary))
|
50 |
+
else:
|
51 |
+
response = requests.get(image_url)
|
52 |
+
image_bytes = response.content
|
53 |
+
image = Image.open(io.BytesIO(image_bytes))
|
54 |
+
results = model.predict(image)
|
55 |
+
result = results[0]
|
56 |
+
img = Image.fromarray(result.plot()[:, :, ::-1])
|
57 |
+
st.image(img)
|
58 |
+
if image_type == "Тест":
|
59 |
+
video_file = open('images/testvideo.mp4', 'rb')
|
60 |
+
video_bytes = video_file.read()
|
61 |
+
|
62 |
+
st.video(video_bytes)
|
63 |
+
|
pages/__init__.py
ADDED
File without changes
|
requirements.txt
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
altair==5.1.2
|
2 |
+
attrs==23.1.0
|
3 |
+
blinker==1.6.3
|
4 |
+
cachetools==5.3.2
|
5 |
+
certifi==2023.7.22
|
6 |
+
charset-normalizer==3.3.1
|
7 |
+
click==8.1.7
|
8 |
+
contourpy==1.1.1
|
9 |
+
cycler==0.12.1
|
10 |
+
filelock==3.12.4
|
11 |
+
fonttools==4.43.1
|
12 |
+
fsspec==2023.10.0
|
13 |
+
gitdb==4.0.11
|
14 |
+
GitPython==3.1.40
|
15 |
+
idna==3.4
|
16 |
+
importlib-metadata==6.8.0
|
17 |
+
Jinja2==3.1.2
|
18 |
+
jsonschema==4.19.1
|
19 |
+
jsonschema-specifications==2023.7.1
|
20 |
+
kiwisolver==1.4.5
|
21 |
+
markdown-it-py==3.0.0
|
22 |
+
MarkupSafe==2.1.3
|
23 |
+
matplotlib==3.8.0
|
24 |
+
mdurl==0.1.2
|
25 |
+
mpmath==1.3.0
|
26 |
+
networkx==3.2
|
27 |
+
numpy==1.26.1
|
28 |
+
nvidia-cublas-cu12==12.1.3.1
|
29 |
+
nvidia-cuda-cupti-cu12==12.1.105
|
30 |
+
nvidia-cuda-nvrtc-cu12==12.1.105
|
31 |
+
nvidia-cuda-runtime-cu12==12.1.105
|
32 |
+
nvidia-cudnn-cu12==8.9.2.26
|
33 |
+
nvidia-cufft-cu12==11.0.2.54
|
34 |
+
nvidia-curand-cu12==10.3.2.106
|
35 |
+
nvidia-cusolver-cu12==11.4.5.107
|
36 |
+
nvidia-cusparse-cu12==12.1.0.106
|
37 |
+
nvidia-nccl-cu12==2.18.1
|
38 |
+
nvidia-nvjitlink-cu12==12.3.52
|
39 |
+
nvidia-nvtx-cu12==12.1.105
|
40 |
+
opencv-python==4.8.1.78
|
41 |
+
opencv-python-headless==4.8.1.78
|
42 |
+
packaging==23.2
|
43 |
+
pandas==2.1.1
|
44 |
+
Pillow==10.1.0
|
45 |
+
protobuf==4.24.4
|
46 |
+
psutil==5.9.6
|
47 |
+
py-cpuinfo==9.0.0
|
48 |
+
pyarrow==13.0.0
|
49 |
+
pydeck==0.8.1b0
|
50 |
+
Pygments==2.16.1
|
51 |
+
pyparsing==3.1.1
|
52 |
+
python-dateutil==2.8.2
|
53 |
+
pytz==2023.3.post1
|
54 |
+
PyYAML==6.0.1
|
55 |
+
referencing==0.30.2
|
56 |
+
requests==2.31.0
|
57 |
+
rich==13.6.0
|
58 |
+
rpds-py==0.10.6
|
59 |
+
scipy==1.11.3
|
60 |
+
seaborn==0.13.0
|
61 |
+
six==1.16.0
|
62 |
+
smmap==5.0.1
|
63 |
+
streamlit==1.27.2
|
64 |
+
sympy==1.12
|
65 |
+
tenacity==8.2.3
|
66 |
+
thop==0.1.1.post2209072238
|
67 |
+
toml==0.10.2
|
68 |
+
toolz==0.12.0
|
69 |
+
torch==2.1.0
|
70 |
+
torchutils==0.0.4
|
71 |
+
torchvision==0.16.0
|
72 |
+
tornado==6.3.3
|
73 |
+
tqdm==4.66.1
|
74 |
+
triton==2.1.0
|
75 |
+
typing_extensions==4.8.0
|
76 |
+
tzdata==2023.3
|
77 |
+
tzlocal==5.2
|
78 |
+
ultralytics==8.0.201
|
79 |
+
urllib3==2.0.7
|
80 |
+
validators==0.22.0
|
81 |
+
watchdog==3.0.0
|
82 |
+
zipp==3.17.0
|
weights/__init__.py
ADDED
File without changes
|
weights/autoencoder.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d348b8308b431efefb3aa6320d328d66e5fa83bc807902c26420732ee65bfbf2
|
3 |
+
size 174416
|
weights/braintumor.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86c06c82deb20ca5b735d068ac02f5303f8d356febfe33641d2413efd2214be0
|
3 |
+
size 14443752
|
weights/model.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:837c9232cadb151a3b12260841da0ace793a6bff16b37e192cadf91d980c93b4
|
3 |
+
size 2231
|
weights/preprocessing.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ef682ff4bf4554c52786ff31fc044020280b8d95dbc9a5feb39315137182736
|
3 |
+
size 575
|
weights/turbine.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd0318c891e83f984858570ae9344f78b21a555f68321bae1161ceeff7bca0ab
|
3 |
+
size 6257113
|