Skip to content

Commit

Permalink
Merge pull request #14 from andreped/dev
Browse files Browse the repository at this point in the history
Added argparse support + linting CI + refactored
  • Loading branch information
andreped committed Jun 7, 2023
2 parents 808d93e + 188c089 commit 856ed7f
Show file tree
Hide file tree
Showing 9 changed files with 259 additions and 76 deletions.
28 changes: 28 additions & 0 deletions .github/workflows/linting.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: Linting

on:
push:
branches:
- '*'
pull_request:
branches:
- '*'
workflow_dispatch:

jobs:
build:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v1
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.7

- name: Install lint dependencies
run: |
pip install wheel setuptools
pip install black==22.3.0 isort==5.10.1 flake8==4.0.1
- name: Lint the code
run: sh shell/lint.sh
26 changes: 25 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ license: mit
app_file: app.py
---

<div align="center">
<div align="center">M
<h1 align="center">neukit</h1>
<h3 align="center">Automatic brain extraction and preoperative tumor segmentation from MRI</h3>

Expand All @@ -36,6 +36,8 @@ To access the live demo, click on the `Hugging Face` badge above. Below is a sna

## Development

### Docker

Alternatively, you can deploy the software locally. Note that this is only relevant for development purposes. Simply dockerize the app and run it:

```
Expand All @@ -45,6 +47,28 @@ docker run -it -p 7860:7860 neukit

Then open `http://127.0.0.1:7860` in your favourite internet browser to view the demo.

### Python

It is also possible to run the app locally without Docker. Just setup a virtual environment and run the app.
Note that the current working directory would need to be adjusted based on where `neukit` is located on disk.

```
git clone https://github.com/andreped/neukit.git
cd neukit/
virtualenv -ppython3 venv --clear
source venv/bin/activate
pip install -r requirements.txt
python app.py --cwd ./
```

## Troubleshooting

Note that due to `share=True` being enabled by default when launching the app,
internet access is required for the app to be launched. This can disabled by setting
the argument to `--share 0`.

## Citation

If you found the tool useful in your research, please, cite the corresponding software paper:
Expand Down
33 changes: 29 additions & 4 deletions app.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,39 @@
import os
from argparse import ArgumentParser

from neukit.gui import WebUI


def main():
print("Launching demo...")
parser = ArgumentParser()
parser.add_argument(
"--cwd",
type=str,
default="/home/user/app/",
help="Set current working directory (path to app.py).",
)
parser.add_argument(
"--share",
type=int,
default=1,
help="Whether to enable the app to be accessible online"
"-> setups a public link which requires internet access.",
)
args = parser.parse_args()

# cwd = "/Users/andreped/workspace/neukit/" # local testing -> macOS
cwd = "/home/user/app/" # production -> docker
print("Current working directory:", args.cwd)

if not os.path.exists(args.cwd):
raise ValueError("Chosen 'cwd' is not a valid path!")
if args.share not in [0, 1]:
raise ValueError(
"The 'share' argument can only be set to 0 or 1, but was:",
args.share,
)

# initialize and run app
app = WebUI(cwd=cwd)
print("Launching demo...")
app = WebUI(cwd=args.cwd, share=args.share)
app.run()


Expand Down
114 changes: 76 additions & 38 deletions neukit/gui.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,20 @@
import os

import gradio as gr
from .utils import load_ct_to_numpy, load_pred_volume_to_numpy, nifti_to_glb

from .inference import run_model
from .utils import load_ct_to_numpy
from .utils import load_pred_volume_to_numpy
from .utils import nifti_to_glb


class WebUI:
def __init__(self, model_name:str = None, cwd:str = "/home/user/app/"):
def __init__(
self,
model_name: str = None,
cwd: str = "/home/user/app/",
share: int = 1,
):
# global states
self.images = []
self.pred_images = []
Expand All @@ -14,8 +24,9 @@ def __init__(self, model_name:str = None, cwd:str = "/home/user/app/"):

self.model_name = model_name
self.cwd = cwd
self.share = share

self.class_name = "meningioma" # default - but can be updated based on which task is chosen from dropdown
self.class_name = "meningioma" # default
self.class_names = {
"meningioma": "MRI_Meningioma",
"low-grade": "MRI_LGGlioma",
Expand All @@ -33,41 +44,55 @@ def __init__(self, model_name:str = None, cwd:str = "/home/user/app/"):
}

# define widgets not to be rendered immediantly, but later on
self.slider = gr.Slider(1, self.nb_slider_items, value=1, step=1, label="Which 2D slice to show")
self.slider = gr.Slider(
1,
self.nb_slider_items,
value=1,
step=1,
label="Which 2D slice to show",
)
self.volume_renderer = gr.Model3D(
clear_color=[0.0, 0.0, 0.0, 0.0],
label="3D Model",
visible=True,
elem_id="model-3d",
).style(height=512)

def set_class_name(self, value):
print("Changed task to:", value)
self.class_name = value

def combine_ct_and_seg(self, img, pred):
return (img, [(pred, self.class_name)])

def upload_file(self, file):
return file.name
def load_mesh(self, mesh_file_name):

def process(self, mesh_file_name):
path = mesh_file_name.name
run_model(path, model_path=self.cwd + "resources/models/", task=self.class_names[self.class_name], name=self.result_names[self.class_name])
run_model(
path,
model_path=os.path.join(self.cwd, "resources/models/"),
task=self.class_names[self.class_name],
name=self.result_names[self.class_name],
)
nifti_to_glb("prediction.nii.gz")

self.images = load_ct_to_numpy(path)
self.pred_images = load_pred_volume_to_numpy("./prediction.nii.gz")
return "./prediction.obj"

def get_img_pred_pair(self, k):
k = int(k) - 1
out = [gr.AnnotatedImage.update(visible=False)] * self.nb_slider_items
out[k] = gr.AnnotatedImage.update(self.combine_ct_and_seg(self.images[k], self.pred_images[k]), visible=True)
out[k] = gr.AnnotatedImage.update(
self.combine_ct_and_seg(self.images[k], self.pred_images[k]),
visible=True,
)
return out

def run(self):
css="""
css = """
#model-3d {
height: 512px;
}
Expand All @@ -80,18 +105,15 @@ def run(self):
}
"""
with gr.Blocks(css=css) as demo:

with gr.Row():

file_output = gr.File(file_count="single", elem_id="upload") # elem_id="upload"
file_output = gr.File(file_count="single", elem_id="upload")
file_output.upload(self.upload_file, file_output, file_output)

# with gr.Column():

model_selector = gr.Dropdown(
list(self.class_names.keys()),
label="Task",
info="Which task to perform - one model for each brain tumor type and brain extraction",
info="Which task to perform - one model for"
"each brain tumor type and brain extraction",
multiselect=False,
size="sm",
)
Expand All @@ -101,39 +123,55 @@ def run(self):
outputs=None,
)

run_btn = gr.Button("Run analysis").style(full_width=False, size="lg")
run_btn = gr.Button("Run analysis").style(
full_width=False, size="lg"
)
run_btn.click(
fn=lambda x: self.load_mesh(x),
fn=lambda x: self.process(x),
inputs=file_output,
outputs=self.volume_renderer,
)

with gr.Row():
gr.Examples(
examples=[self.cwd + "RegLib_C01_1.nii", self.cwd + "RegLib_C01_2.nii"],
examples=[
os.path.join(self.cwd, "RegLib_C01_1.nii"),
os.path.join(self.cwd, "RegLib_C01_2.nii"),
],
inputs=file_output,
outputs=file_output,
fn=self.upload_file,
cache_examples=True,
)

with gr.Row():
with gr.Box():
image_boxes = []
for i in range(self.nb_slider_items):
visibility = True if i == 1 else False
t = gr.AnnotatedImage(visible=visibility, elem_id="model-2d")\
.style(color_map={self.class_name: "#ffae00"}, height=512, width=512)
image_boxes.append(t)

self.slider.change(self.get_img_pred_pair, self.slider, image_boxes)

with gr.Column():
image_boxes = []
for i in range(self.nb_slider_items):
visibility = True if i == 1 else False
t = gr.AnnotatedImage(
visible=visibility, elem_id="model-2d"
).style(
color_map={self.class_name: "#ffae00"},
height=512,
width=512,
)
image_boxes.append(t)

self.slider.input(
self.get_img_pred_pair, self.slider, image_boxes
)

self.slider.render()

with gr.Box():
self.volume_renderer.render()

with gr.Row():
self.slider.render()

# sharing app publicly -> share=True: https://gradio.app/sharing-your-app/
# inference times > 60 seconds -> need queue(): https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True)
# sharing app publicly -> share=True:
# https://gradio.app/sharing-your-app/
# inference times > 60 seconds -> need queue():
# https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
demo.queue().launch(
server_name="0.0.0.0", server_port=7860, share=self.share
)
Loading

0 comments on commit 856ed7f

Please sign in to comment.