Spaces:
Sleeping
Sleeping
Setup repo + demo
Browse files- .dockerignore +0 -0
- Dockerfile +74 -0
- LICENSE.md +21 -0
- demo/README.md +31 -0
- demo/app.py +18 -0
- demo/requirements.txt +2 -0
- demo/src/__init__.py +0 -0
- demo/src/compute.py +97 -0
- demo/src/convert.py +24 -0
- demo/src/gui.py +103 -0
- demo/src/utils.py +38 -0
- setup.cfg +14 -0
.dockerignore
ADDED
File without changes
|
Dockerfile
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
+
# you will also find guides on how best to write your Dockerfile
|
3 |
+
|
4 |
+
# creates virtual ubuntu in docker image
|
5 |
+
FROM ubuntu:22.04
|
6 |
+
|
7 |
+
# set language, format and stuff
|
8 |
+
ENV LANG=C.UTF-8 LC_ALL=C.UTF-8
|
9 |
+
|
10 |
+
# NOTE: using -y is conveniently to automatically answer yes to all the questions
|
11 |
+
# installing python3 with a specific version
|
12 |
+
RUN apt-get update -y
|
13 |
+
RUN apt-get upgrade -y
|
14 |
+
RUN apt install software-properties-common -y
|
15 |
+
RUN add-apt-repository ppa:deadsnakes/ppa -y
|
16 |
+
RUN apt update
|
17 |
+
RUN apt install python3.7 -y
|
18 |
+
RUN apt install python3.7-distutils -y
|
19 |
+
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 1
|
20 |
+
|
21 |
+
# installing other libraries
|
22 |
+
RUN apt-get install python3-pip -y && \
|
23 |
+
apt-get -y install sudo
|
24 |
+
RUN apt-get install curl -y
|
25 |
+
RUN apt-get install nano -y
|
26 |
+
RUN apt-get update && apt-get install -y git
|
27 |
+
RUN apt-get install libblas-dev -y && apt-get install liblapack-dev -y
|
28 |
+
RUN apt-get install gfortran -y
|
29 |
+
RUN apt-get install libpng-dev -y
|
30 |
+
RUN apt-get install python3-dev -y
|
31 |
+
# RUN apt-get -y install cmake curl
|
32 |
+
|
33 |
+
WORKDIR /code
|
34 |
+
|
35 |
+
# install dependencies
|
36 |
+
COPY ./requirements.txt /code/requirements.txt
|
37 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
38 |
+
|
39 |
+
# resolve issue with tf==2.4 and gradio dependency collision issue
|
40 |
+
RUN pip install --force-reinstall typing_extensions==4.7.1
|
41 |
+
|
42 |
+
# Install wget
|
43 |
+
RUN apt install wget -y && \
|
44 |
+
apt install unzip
|
45 |
+
|
46 |
+
# Set up a new user named "user" with user ID 1000
|
47 |
+
RUN useradd -m -u 1000 user
|
48 |
+
|
49 |
+
# Switch to the "user" user
|
50 |
+
USER user
|
51 |
+
|
52 |
+
# Set home to the user's home directory
|
53 |
+
ENV HOME=/home/user \
|
54 |
+
PATH=/home/user/.local/bin:$PATH
|
55 |
+
|
56 |
+
# Set the working directory to the user's home directory
|
57 |
+
WORKDIR $HOME/app
|
58 |
+
|
59 |
+
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
60 |
+
COPY --chown=user . $HOME/app
|
61 |
+
|
62 |
+
# Download pretrained models
|
63 |
+
RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-CT_Airways-ONNX-v12.zip" && \
|
64 |
+
unzip "Raidionics-CT_Airways-ONNX-v12.zip" && mkdir -p resources/models/ && mv CT_Airways/ resources/models/CT_Airways/
|
65 |
+
RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-CT_Lungs-ONNX-v12.zip" && \
|
66 |
+
unzip "Raidionics-CT_Lungs-ONNX-v12.zip" && mv CT_Lungs/ resources/models/CT_Lungs/
|
67 |
+
|
68 |
+
RUN rm -r *.zip
|
69 |
+
|
70 |
+
# Download test sample
|
71 |
+
RUN wget "https://github.com/andreped/neukit/releases/download/test-data/test_thorax_CT.nii.gz"
|
72 |
+
|
73 |
+
# CMD ["/bin/bash"]
|
74 |
+
CMD ["python3", "app.py"]
|
LICENSE.md
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 André Pedersen
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
demo/README.md
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Hugging Face demo - through docker SDK
|
2 |
+
|
3 |
+
Deploying simple models in a gradio-based web interface in Hugging Face spaces is easy.
|
4 |
+
For any other custom pipeline, with various dependencies and challenging behaviour, it
|
5 |
+
might be necessary to use Docker containers instead.
|
6 |
+
|
7 |
+
For every new push to the main branch, continuous deployment to the Hugging Face
|
8 |
+
`AeroPath` space is performed through a GitHub Actions workflow.
|
9 |
+
|
10 |
+
When the space is updated, the Docker image is rebuilt/updated (caching if possible).
|
11 |
+
Then when finished, the end users can test the app as they please.
|
12 |
+
|
13 |
+
Right now, the functionality of the app is extremely limited, only offering a widget
|
14 |
+
for uploading a NIfTI file (`.nii` or `.nii.gz`) and visualizing the produced surface
|
15 |
+
of the predicted lung tumor volume when finished processing.
|
16 |
+
|
17 |
+
Analysis process can be monitored from the `Logs` tab next to the `Running` button
|
18 |
+
in the Hugging Face `AeroPath` space.
|
19 |
+
|
20 |
+
It is also possible to build the app as a docker image and deploy it. To do so follow these steps:
|
21 |
+
|
22 |
+
```
|
23 |
+
docker build -t AeroPath:latest ..
|
24 |
+
docker run -it -p 7860:7860 AeroPath:latest
|
25 |
+
```
|
26 |
+
|
27 |
+
Then open `http://localhost:7860` in your favourite internet browser to view the demo.
|
28 |
+
|
29 |
+
TODOs:
|
30 |
+
- [X] Add gallery widget to enable scrolling through 2D slices
|
31 |
+
- [X] Render segmentation for individual 2D slices as overlays
|
demo/app.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from src.gui import WebUI
|
2 |
+
|
3 |
+
|
4 |
+
def main():
|
5 |
+
print("Launching demo...")
|
6 |
+
|
7 |
+
# cwd = "/Users/andreped/workspace/AeroPath/" # local testing -> macOS
|
8 |
+
cwd = "/home/user/app/" # production -> docker
|
9 |
+
|
10 |
+
class_name = "tumor"
|
11 |
+
|
12 |
+
# initialize and run app
|
13 |
+
app = WebUI(class_name=class_name, cwd=cwd)
|
14 |
+
app.run()
|
15 |
+
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
main()
|
demo/requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
lungtumormask @ git+https://github.com/vemundfredriksen/LungTumorMask.git
|
2 |
+
gradio==3.44.4
|
demo/src/__init__.py
ADDED
File without changes
|
demo/src/compute.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import configparser
|
2 |
+
import logging
|
3 |
+
import os
|
4 |
+
import shutil
|
5 |
+
|
6 |
+
|
7 |
+
def run_model(
|
8 |
+
input_path: str,
|
9 |
+
model_path: str,
|
10 |
+
verbose: str = "info",
|
11 |
+
task: str = "MRI_Meningioma",
|
12 |
+
name: str = "Tumor",
|
13 |
+
):
|
14 |
+
logging.basicConfig()
|
15 |
+
logging.getLogger().setLevel(logging.WARNING)
|
16 |
+
|
17 |
+
if verbose == "debug":
|
18 |
+
logging.getLogger().setLevel(logging.DEBUG)
|
19 |
+
elif verbose == "info":
|
20 |
+
logging.getLogger().setLevel(logging.INFO)
|
21 |
+
elif verbose == "error":
|
22 |
+
logging.getLogger().setLevel(logging.ERROR)
|
23 |
+
else:
|
24 |
+
raise ValueError("Unsupported verbose value provided:", verbose)
|
25 |
+
|
26 |
+
# delete patient/result folder if they exist
|
27 |
+
if os.path.exists("./patient/"):
|
28 |
+
shutil.rmtree("./patient/")
|
29 |
+
if os.path.exists("./result/"):
|
30 |
+
shutil.rmtree("./result/")
|
31 |
+
|
32 |
+
try:
|
33 |
+
# setup temporary patient directory
|
34 |
+
filename = input_path.split("/")[-1]
|
35 |
+
splits = filename.split(".")
|
36 |
+
extension = ".".join(splits[1:])
|
37 |
+
patient_directory = "./patient/"
|
38 |
+
os.makedirs(patient_directory + "T0/", exist_ok=True)
|
39 |
+
shutil.copy(
|
40 |
+
input_path,
|
41 |
+
patient_directory + "T0/" + splits[0] + "-t1gd." + extension,
|
42 |
+
)
|
43 |
+
|
44 |
+
# define output directory to save results
|
45 |
+
output_path = "./result/prediction-" + splits[0] + "/"
|
46 |
+
os.makedirs(output_path, exist_ok=True)
|
47 |
+
|
48 |
+
# Setting up the configuration file
|
49 |
+
rads_config = configparser.ConfigParser()
|
50 |
+
rads_config.add_section("Default")
|
51 |
+
rads_config.set("Default", "task", "neuro_diagnosis")
|
52 |
+
rads_config.set("Default", "caller", "")
|
53 |
+
rads_config.add_section("System")
|
54 |
+
rads_config.set("System", "gpu_id", "-1")
|
55 |
+
rads_config.set("System", "input_folder", patient_directory)
|
56 |
+
rads_config.set("System", "output_folder", output_path)
|
57 |
+
rads_config.set("System", "model_folder", model_path)
|
58 |
+
rads_config.set(
|
59 |
+
"System",
|
60 |
+
"pipeline_filename",
|
61 |
+
os.path.join(model_path, task, "pipeline.json"),
|
62 |
+
)
|
63 |
+
rads_config.add_section("Runtime")
|
64 |
+
rads_config.set(
|
65 |
+
"Runtime", "reconstruction_method", "thresholding"
|
66 |
+
) # thresholding, probabilities
|
67 |
+
rads_config.set("Runtime", "reconstruction_order", "resample_first")
|
68 |
+
rads_config.set("Runtime", "use_preprocessed_data", "False")
|
69 |
+
|
70 |
+
with open("rads_config.ini", "w") as f:
|
71 |
+
rads_config.write(f)
|
72 |
+
|
73 |
+
# finally, run inference
|
74 |
+
from raidionicsrads.compute import run_rads
|
75 |
+
|
76 |
+
run_rads(config_filename="rads_config.ini")
|
77 |
+
|
78 |
+
# rename and move final result
|
79 |
+
os.rename(
|
80 |
+
"./result/prediction-"
|
81 |
+
+ splits[0]
|
82 |
+
+ "/T0/"
|
83 |
+
+ splits[0]
|
84 |
+
+ "-t1gd_annotation-"
|
85 |
+
+ name
|
86 |
+
+ ".nii.gz",
|
87 |
+
"./prediction.nii.gz",
|
88 |
+
)
|
89 |
+
|
90 |
+
except Exception as e:
|
91 |
+
print(e)
|
92 |
+
|
93 |
+
# Clean-up
|
94 |
+
if os.path.exists(patient_directory):
|
95 |
+
shutil.rmtree(patient_directory)
|
96 |
+
if os.path.exists(output_path):
|
97 |
+
shutil.rmtree(output_path)
|
demo/src/convert.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import nibabel as nib
|
2 |
+
from nibabel.processing import resample_to_output
|
3 |
+
from skimage.measure import marching_cubes
|
4 |
+
|
5 |
+
|
6 |
+
def nifti_to_glb(path, output="prediction.obj"):
|
7 |
+
# load NIFTI into numpy array
|
8 |
+
image = nib.load(path)
|
9 |
+
resampled = resample_to_output(image, [1, 1, 1], order=1)
|
10 |
+
data = resampled.get_fdata().astype("uint8")
|
11 |
+
|
12 |
+
# extract surface
|
13 |
+
verts, faces, normals, values = marching_cubes(data, 0)
|
14 |
+
faces += 1
|
15 |
+
|
16 |
+
with open(output, 'w') as thefile:
|
17 |
+
for item in verts:
|
18 |
+
thefile.write("v {0} {1} {2}\n".format(item[0],item[1],item[2]))
|
19 |
+
|
20 |
+
for item in normals:
|
21 |
+
thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2]))
|
22 |
+
|
23 |
+
for item in faces:
|
24 |
+
thefile.write("f {0}//{0} {1}//{1} {2}//{2}\n".format(item[0],item[1],item[2]))
|
demo/src/gui.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from .utils import load_ct_to_numpy, load_pred_volume_to_numpy
|
3 |
+
from .compute import run_model
|
4 |
+
from .convert import nifti_to_glb
|
5 |
+
|
6 |
+
|
7 |
+
class WebUI:
|
8 |
+
def __init__(self, class_name:str = None, cwd:str = None):
|
9 |
+
# global states
|
10 |
+
self.images = []
|
11 |
+
self.pred_images = []
|
12 |
+
|
13 |
+
# @TODO: This should be dynamically set based on chosen volume size
|
14 |
+
self.nb_slider_items = 300
|
15 |
+
|
16 |
+
self.class_name = class_name
|
17 |
+
self.cwd = cwd
|
18 |
+
|
19 |
+
# define widgets not to be rendered immediantly, but later on
|
20 |
+
self.slider = gr.Slider(1, self.nb_slider_items, value=1, step=1, label="Which 2D slice to show")
|
21 |
+
self.volume_renderer = gr.Model3D(
|
22 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
23 |
+
label="3D Model",
|
24 |
+
visible=True,
|
25 |
+
elem_id="model-3d",
|
26 |
+
).style(height=512)
|
27 |
+
|
28 |
+
def combine_ct_and_seg(self, img, pred):
|
29 |
+
return (img, [(pred, self.class_name)])
|
30 |
+
|
31 |
+
def upload_file(self, file):
|
32 |
+
return file.name
|
33 |
+
|
34 |
+
def load_mesh(self, mesh_file_name):
|
35 |
+
path = mesh_file_name.name
|
36 |
+
run_model(path)
|
37 |
+
nifti_to_glb("./prediction.nii.gz")
|
38 |
+
self.images = load_ct_to_numpy(path)
|
39 |
+
self.pred_images = load_pred_volume_to_numpy("./prediction.nii.gz")
|
40 |
+
self.slider = self.slider.update(value=2)
|
41 |
+
return "./prediction.obj"
|
42 |
+
|
43 |
+
def get_img_pred_pair(self, k):
|
44 |
+
k = int(k) - 1
|
45 |
+
out = [gr.AnnotatedImage.update(visible=False)] * self.nb_slider_items
|
46 |
+
out[k] = gr.AnnotatedImage.update(self.combine_ct_and_seg(self.images[k], self.pred_images[k]), visible=True)
|
47 |
+
return out
|
48 |
+
|
49 |
+
def run(self):
|
50 |
+
css="""
|
51 |
+
#model-3d {
|
52 |
+
height: 512px;
|
53 |
+
}
|
54 |
+
#model-2d {
|
55 |
+
height: 512px;
|
56 |
+
margin: auto;
|
57 |
+
}
|
58 |
+
"""
|
59 |
+
with gr.Blocks(css=css) as demo:
|
60 |
+
|
61 |
+
with gr.Row():
|
62 |
+
file_output = gr.File(
|
63 |
+
file_types=[".nii", ".nii.nz"],
|
64 |
+
file_count="single"
|
65 |
+
).style(full_width=False, size="sm")
|
66 |
+
file_output.upload(self.upload_file, file_output, file_output)
|
67 |
+
|
68 |
+
run_btn = gr.Button("Run analysis").style(full_width=False, size="sm")
|
69 |
+
run_btn.click(
|
70 |
+
fn=lambda x: self.load_mesh(x),
|
71 |
+
inputs=file_output,
|
72 |
+
outputs=self.volume_renderer
|
73 |
+
)
|
74 |
+
|
75 |
+
with gr.Row():
|
76 |
+
gr.Examples(
|
77 |
+
examples=[self.cwd + "lung_001.nii.gz"],
|
78 |
+
inputs=file_output,
|
79 |
+
outputs=file_output,
|
80 |
+
fn=self.upload_file,
|
81 |
+
cache_examples=True,
|
82 |
+
)
|
83 |
+
|
84 |
+
with gr.Row():
|
85 |
+
with gr.Box():
|
86 |
+
image_boxes = []
|
87 |
+
for i in range(self.nb_slider_items):
|
88 |
+
visibility = True if i == 1 else False
|
89 |
+
t = gr.AnnotatedImage(visible=visibility, elem_id="model-2d")\
|
90 |
+
.style(color_map={self.class_name: "#ffae00"}, height=512, width=512)
|
91 |
+
image_boxes.append(t)
|
92 |
+
|
93 |
+
self.slider.change(self.get_img_pred_pair, self.slider, image_boxes)
|
94 |
+
|
95 |
+
with gr.Box():
|
96 |
+
self.volume_renderer.render()
|
97 |
+
|
98 |
+
with gr.Row():
|
99 |
+
self.slider.render()
|
100 |
+
|
101 |
+
# sharing app publicly -> share=True: https://gradio.app/sharing-your-app/
|
102 |
+
# inference times > 60 seconds -> need queue(): https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
|
103 |
+
demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
|
demo/src/utils.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import nibabel as nib
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
|
5 |
+
def load_ct_to_numpy(data_path):
|
6 |
+
if type(data_path) != str:
|
7 |
+
data_path = data_path.name
|
8 |
+
|
9 |
+
image = nib.load(data_path)
|
10 |
+
data = image.get_fdata()
|
11 |
+
|
12 |
+
data = np.rot90(data, k=1, axes=(0, 1))
|
13 |
+
|
14 |
+
data[data < -1024] = 1024
|
15 |
+
data[data > 1024] = 1024
|
16 |
+
|
17 |
+
data = data - np.amin(data)
|
18 |
+
data = data / np.amax(data) * 255
|
19 |
+
data = data.astype("uint8")
|
20 |
+
|
21 |
+
print(data.shape)
|
22 |
+
return [data[..., i] for i in range(data.shape[-1])]
|
23 |
+
|
24 |
+
|
25 |
+
def load_pred_volume_to_numpy(data_path):
|
26 |
+
if type(data_path) != str:
|
27 |
+
data_path = data_path.name
|
28 |
+
|
29 |
+
image = nib.load(data_path)
|
30 |
+
data = image.get_fdata()
|
31 |
+
|
32 |
+
data = np.rot90(data, k=1, axes=(0, 1))
|
33 |
+
|
34 |
+
data[data > 0] = 1
|
35 |
+
data = data.astype("uint8")
|
36 |
+
|
37 |
+
print(data.shape)
|
38 |
+
return [data[..., i] for i in range(data.shape[-1])]
|
setup.cfg
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[metadata]
|
2 |
+
description-file = README.md
|
3 |
+
|
4 |
+
[isort]
|
5 |
+
force_single_line=True
|
6 |
+
known_first_party=aeropath
|
7 |
+
line_length=80
|
8 |
+
profile=black
|
9 |
+
|
10 |
+
[flake8]
|
11 |
+
# imported but unused in __init__.py, that's ok.
|
12 |
+
per-file-ignores=*__init__.py:F401
|
13 |
+
ignore=E203,W503,W605,F632,E266,E731,E712,E741
|
14 |
+
max-line-length=80
|