Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
qinliuliuqin committed Sep 29, 2022
0 parents commit 3e01ad5
Show file tree
Hide file tree
Showing 355 changed files with 61,697 additions and 0 deletions.
132 changes: 132 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
# Models
weights/

# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/
40 changes: 40 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04

ENV DEBIAN_FRONTEND=noninteractive

RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
git \
curl \
libglib2.0-0 \
software-properties-common \
python3.6-dev \
python3-pip \
python3-tk \
firefox \
libcanberra-gtk-module \
nano

WORKDIR /tmp

RUN pip3 install --upgrade pip
RUN pip3 install setuptools
RUN pip3 install matplotlib numpy pandas scipy tqdm pyyaml easydict scikit-image bridson Pillow ninja
RUN pip3 install imgaug mxboard graphviz
RUN pip3 install albumentations --no-deps
RUN pip3 install opencv-python-headless
RUN pip3 install Cython
RUN pip3 install torch
RUN pip3 install torchvision
RUN pip3 install scikit-learn
RUN pip3 install tensorboard

RUN mkdir /work
WORKDIR /work
RUN chmod -R 777 /work && chmod -R 777 /root

ENV TINI_VERSION v0.18.0
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /usr/bin/tini
RUN chmod +x /usr/bin/tini
ENTRYPOINT [ "/usr/bin/tini", "--" ]
CMD [ "/bin/bash" ]
21 changes: 21 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
The MIT License

Copyright (c) 2021 Samsung Electronics Co., Ltd.

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
77 changes: 77 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
## [iSegFormer: Interactive Image Segmentation via Transformers with Application to 3D Knee MR Images](https://arxiv.org/abs/2112.11325)
<p align="center">
<a href="https://arxiv.org/abs/2112.11325">
<img src="https://img.shields.io/badge/arXiv-2102.06583-b31b1b"/>
</a>
<a href="https://colab.research.google.com/github/qinliuliuqin/iSegFormer/blob/main/notebooks/colab_test_isegformer.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
<a href="https://opensource.org/licenses/MIT">
<img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="The MIT License"/>
</a>
</p>

<p align="center">
<img src="./assets/img/iSegFormer.png" alt="drawing", width="650"/>
</p>

## Installation
If you want to test our models remotely, run this [colab notebook](https://colab.research.google.com/github/qinliuliuqin/iSegFormer/blob/main/notebooks/colab_test_isegformer.ipynb
). Otherwise, you have to download our codebase and install it locally.

This framework is built using Python 3.9 and relies on the PyTorch 1.4.0+. The following command installs all
necessary packages:

```.bash
pip3 install -r requirements.txt
```
If you want to run training or testing, you must configure the paths to the datasets in [config.yml](config.yml).

## Demo with GUI
```
$ ./run_demo.sh
```

## Evaluation
First, download the [datasets and pretrained weights](https://github.com/qinliuliuqin/iSegFormer/releases) and run the following code for evaluation:
```
python scripts/evaluate_model.py NoBRS \
--gpu 0 \
--checkpoint=./weights/imagenet21k_pretrain_cocolvis_finetune_segformerb5_epoch_54.pth \
--dataset=OAIZIB
```

## Training
Train the Swin-B model on the OAIZIB dataset.
```
python train.py models/iter_mask/swinformer_large_oaizib_itermask.py \
--batch-size=22 \
--gpu=0
```

## Model Weights
We released two models: Swin-B and HRNet32 that can be downloaded in the [release page](https://github.com/qinliuliuqin/iSegFormer/releases).

<!-- ## Datasets
[OAI-ZIB-test](https://github.com/qinliuliuqin/iSegFormer/releases/download/v0.1/OAI-ZIB-test.zip) \
[BraTS20](https://drive.google.com/drive/folders/12iSwrI2M98pV7s_5hOrp9r-PELlQzWOq?usp=sharing) \
[ssTEM](https://github.com/unidesigner/groundtruth-drosophila-vnc/tree/master/stack1/raw)
-->
<!-- ## Video Demos
The following two demos are out of date.
[Demo 1: OAI Knee](https://drive.google.com/file/d/1HyQsWYA6aG7I5C57b8ZTczNrW9OR6ZDS/view?usp=sharing) \
[Demo 2: ssTEM](https://drive.google.com/file/d/1dZL91P2rDEQqrlHQi2XaTlnY1rmWezNF/view?usp=sharing)
-->

## License
The code is released under the MIT License. It is a short, permissive software license. Basically, you can do whatever you want as long as you include the original copyright and license notice in any copy of the software/source.

## Citation
```
@article{liu2021isegformer,
title={iSegFormer: Interactive Image Segmentation via Transformers with Application to 3D Knee MR Images},
author={Liu, Qin and Xu, Zhenlin, and Jiao, Yining and Niethammer, Marc},
journal={arXiv preprint arXiv:2112.11325},
year={2021}
}
```
Empty file added __init__.py
Empty file.
Binary file added assets/img/demo_gui.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/img/iSegFormer.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/img/miou_berkeley.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/img/modifying_external_mask.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/img/teaser.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/sbd_samples_weights.pkl
Binary file not shown.
Binary file added assets/test_imgs/apples_bowl.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/test_imgs/parrots.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/test_imgs/sheep.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
47 changes: 47 additions & 0 deletions config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# INTERACTIVE_MODELS_PATH: "./weights"
# INTERACTIVE_MODELS_PATH: "/playpen-raid/qinliu/models/model_1012_2021/iter_mask/pascal_hrnet18/001/checkpoints"
# INTERACTIVE_MODELS_PATH: "/playpen-raid/qinliu/models/model_1015_2021/iter_mask/pascal_segformerb5/005_pretrain_b2/checkpoints"
# INTERACTIVE_MODELS_PATH: "/playpen-raid/qinliu/models/model_0207_2022/iter_mask/cocolvis_swinformer_base/000_cocolvis_swin_base/checkpoints"
# INTERACTIVE_MODELS_PATH: /playpen-raid2/qinliu/models/model_0907_2022/iter_mask/cocolvis_plainvit_base/000/checkpoints
INTERACTIVE_MODELS_PATH: "/playpen-raid2/qinliu/projects/iSegFormer/weights"
EXPS_PATH: "/playpen-raid2/qinliu/models/model_0928_2022"

# Evaluation datasets
GRABCUT_PATH: "/playpen-raid2/qinliu/data/GrabCut"
BERKELEY_PATH: "/playpen-raid/qinliu/data/Berkeley"
DAVIS_PATH: "/playpen-raid/qinliu/data/DAVIS"
COCO_MVAL_PATH: "/playpen-raid/qinliu/data/COCO_MVal"
BraTS_PATH: "/playpen-raid/qinliu/data/BraTS20"
ssTEM_PATH: "/playpen-raid/qinliu/data/ssTEM"
OAIZIB_PATH: "/playpen-raid2/qinliu/data/OAI-ZIB"
OAI_PATH: "/playpen-raid2/qinliu/data/OAI"

# Train datasets
SBD_PATH: "/playpen-raid/qinliu/data/SBD/dataset"
COCO_PATH: "/playpen-raid/qinliu/data/COCO_2017"
LVIS_v1_PATH: "/playpen-raid/qinliu/data/COCO_2017"
OPENIMAGES_PATH: "./datasets/OpenImages"
PASCALVOC_PATH: "/playpen-raid/qinliu/data/PascalVOC"
ADE20K_PATH: "./datasets/ADE20K"

# You can download the weights for HRNet from the repository:
# https://github.com/HRNet/HRNet-Image-Classification
IMAGENET_PRETRAINED_MODELS:
HRNETV2_W18: "./weights/pretrained/hrnetv2_w18_imagenet_pretrained.pth"
HRNETV2_W32: "./weights/pretrained/hrnetv2_w32_imagenet_pretrained.pth"
HRNETV2_W32_SOTA: "./weights/coco_lvis_h32_itermask.pth"
HRNETV2_W40: "./weights/pretrained/hrnetv2_w40_imagenet_pretrained.pth"
HRNETV2_W48: "./weights/pretrained/hrnetv2_w48_imagenet_pretrained.pth"
HRNETV2_W64: "./weights/pretrained/hrnetv2_w64_imagenet_pretrained.pth"
MIT_B5: "./weights/pretrained/mit_b5_converted.pth"
MIT_B4: "./weights/pretrained/mit_b4_converted.pth"
MIT_B3: "./weights/pretrained/mit_b3_converted.pth"
MIT_B2: "./weights/pretrained/mit_b2_converted.pth"
MIT_B1: "./weights/pretrained/mit_b1_converted.pth"
MIT_B0: "./weights/pretrained/mit_b0_converted.pth"
HRF_BASE: "./weights/pretrained/hrt_base.pth"
SWIN_BASE: "/playpen-raid2/qinliu/projects/iSegFormer/weights/pretrained/swin_base_patch4_window12_384_22k.pth"
SWIN_LARGE: "/playpen-raid2/qinliu/projects/iSegFormer/weights/pretrained/swin_large_patch4_window12_384_22k.pth"
MAE_BASE: "/playpen-raid2/qinliu/projects/microViT/pretrain/mae_pretrain_vit_base.pth"
MAE_LARGE: "/playpen-raid2/qinliu/projects/microViT/pretrain/mae_pretrain_vit_large.pth"
MAE_HUGE: "/playpen-raid2/qinliu/projects/microViT/pretrain/mae_pretrain_vit_huge.pth"
59 changes: 59 additions & 0 deletions demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import matplotlib
matplotlib.use('Agg')

import argparse
import tkinter as tk

import torch

from isegm.utils import exp
from isegm.inference import utils
from interactive_demo.app import InteractiveDemoApp

def main():
args, cfg = parse_args()

torch.backends.cudnn.deterministic = True
checkpoint_path = utils.find_checkpoint(cfg.INTERACTIVE_MODELS_PATH, args.checkpoint)
model = utils.load_is_model(checkpoint_path, args.device, cpu_dist_maps=True)

root = tk.Tk()
root.minsize(960, 480)
app = InteractiveDemoApp(root, args, model)
root.deiconify()
app.mainloop()


def parse_args():
parser = argparse.ArgumentParser()

parser.add_argument('--checkpoint', type=str, required=True,
help='The path to the checkpoint. '
'This can be a relative path (relative to cfg.INTERACTIVE_MODELS_PATH) '
'or an absolute path. The file extension can be omitted.')

parser.add_argument('--gpu', type=int, default=0,
help='Id of GPU to use.')

parser.add_argument('--cpu', action='store_true', default=False,
help='Use only CPU for inference.')

parser.add_argument('--limit-longest-size', type=int, default=800,
help='If the largest side of an image exceeds this value, '
'it is resized so that its largest side is equal to this value.')

parser.add_argument('--cfg', type=str, default="config.yml",
help='The path to the config file.')

args = parser.parse_args()
if args.cpu:
args.device =torch.device('cpu')
else:
args.device = torch.device(f'cuda:{args.gpu}')
cfg = exp.load_config_file(args.cfg, return_edict=True)

return args, cfg


if __name__ == '__main__':
main()
Binary file added demo/00.tiff
Binary file not shown.
Binary file added demo/153093.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 3e01ad5

Please sign in to comment.