Skip to content

Commit

Permalink
⚡ Add Mapillary datasets
Browse files Browse the repository at this point in the history
  • Loading branch information
praeclarumjj3 committed Jan 25, 2023
1 parent 5a4a4fa commit d29a1a3
Show file tree
Hide file tree
Showing 9 changed files with 1,199 additions and 0 deletions.
63 changes: 63 additions & 0 deletions configs/mapillary/Base-Mapillary-UnifiedSegmentation.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
MODEL:
BACKBONE:
FREEZE_AT: 0
NAME: "build_resnet_backbone"
WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
RESNETS:
DEPTH: 50
STEM_TYPE: "basic" # not used
STEM_OUT_CHANNELS: 64
STRIDE_IN_1X1: False
OUT_FEATURES: ["res2", "res3", "res4", "res5"]
NORM: "SyncBN" # use syncbn for cityscapes dataset
RES5_MULTI_GRID: [1, 1, 1] # not used
DATASETS:
TRAIN: ("mapillary_vistas_panoptic_train",)
TEST_PANOPTIC: ("mapillary_vistas_panoptic_val",)
TEST_INSTANCE: ("mapillary_vistas_panoptic_val",)
TEST_SEMANTIC: ("mapillary_vistas_sem_seg_val",)
SOLVER:
IMS_PER_BATCH: 16
BASE_LR: 0.0001
MAX_ITER: 300000
WARMUP_FACTOR: 1.0
WARMUP_ITERS: 0
WEIGHT_DECAY: 0.05
OPTIMIZER: "ADAMW"
LR_SCHEDULER_NAME: "WarmupPolyLR"
BACKBONE_MULTIPLIER: 0.1
CLIP_GRADIENTS:
ENABLED: True
CLIP_TYPE: "full_model"
CLIP_VALUE: 0.01
NORM_TYPE: 2.0
AMP:
ENABLED: True
INPUT:
MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 2048) for x in range(5, 21)]"]
MIN_SIZE_TRAIN_SAMPLING: "choice"
MIN_SIZE_TEST: 2048
MAX_SIZE_TRAIN: 8192
MAX_SIZE_TEST: 2048
CROP:
ENABLED: True
TYPE: "absolute"
SIZE: (1024, 1024)
SINGLE_CATEGORY_MAX_AREA: 1.0
COLOR_AUG_SSD: True
SIZE_DIVISIBILITY: 1024 # used in dataset mapper
FORMAT: "RGB"
DATASET_MAPPER_NAME: "oneformer_unified"
MAX_SEQ_LEN: 77
TASK_SEQ_LEN: 77
TASK_PROB:
SEMANTIC: 0.33
INSTANCE: 0.66
TEST:
EVAL_PERIOD: 30000
DATALOADER:
FILTER_EMPTY_ANNOTATIONS: True
NUM_WORKERS: 10
VERSION: 2
18 changes: 18 additions & 0 deletions configs/mapillary/convnext/oneformer_convnext_large_bs16_300k.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
_BASE_: ../oneformer_R50_bs16_300k.yaml
MODEL:
BACKBONE:
NAME: "D2ConvNeXt"
CONVNEXT:
IN_CHANNELS: 3
DEPTHS: [3, 3, 27, 3]
DIMS: [192, 384, 768, 1536]
DROP_PATH_RATE: 0.4
LSIT: 1.0
OUT_INDICES: [0, 1, 2, 3]
WEIGHTS: "convnext_large_22k_1k_384.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
ONE_FORMER:
NUM_OBJECT_QUERIES: 250
TEST:
DETECTIONS_PER_IMAGE: 250
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
_BASE_: ../oneformer_R50_bs16_300k.yaml
MODEL:
BACKBONE:
NAME: "D2ConvNeXt"
CONVNEXT:
IN_CHANNELS: 3
DEPTHS: [3, 3, 27, 3]
DIMS: [256, 512, 1024, 2048]
DROP_PATH_RATE: 0.4
LSIT: 1.0
OUT_INDICES: [0, 1, 2, 3]
WEIGHTS: "convnext_xlarge_22k_1k_384_ema.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
ONE_FORMER:
NUM_OBJECT_QUERIES: 250
TEST:
DETECTIONS_PER_IMAGE: 250
22 changes: 22 additions & 0 deletions configs/mapillary/dinat/oneformer_dinat_large_bs16_300k.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
_BASE_: ../oneformer_R50_bs16_300k.yaml
MODEL:
BACKBONE:
NAME: "D2DiNAT"
DiNAT:
EMBED_DIM: 192
MLP_RATIO: 2.0
DEPTHS: [3, 4, 18, 5]
NUM_HEADS: [6, 12, 24, 48]
KERNEL_SIZE: 11
DROP_PATH_RATE: 0.3
DILATIONS: [[1, 20, 1], [1, 5, 1, 10], [1, 2, 1, 3, 1, 4, 1, 5, 1, 2, 1, 3, 1, 4, 1, 5, 1, 5], [1, 2, 1, 2, 1]]
WEIGHTS: "dinat_large_in22k_in1k_384_11x11.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
ONE_FORMER:
NUM_OBJECT_QUERIES: 250
SOLVER:
AMP:
ENABLED: False
TEST:
DETECTIONS_PER_IMAGE: 250
59 changes: 59 additions & 0 deletions configs/mapillary/oneformer_R50_bs16_300k.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
_BASE_: Base-Mapillary-UnifiedSegmentation.yaml
MODEL:
META_ARCHITECTURE: "OneFormer"
SEM_SEG_HEAD:
NAME: "OneFormerHead"
IGNORE_VALUE: 65
NUM_CLASSES: 65
LOSS_WEIGHT: 1.0
CONVS_DIM: 256
MASK_DIM: 256
NORM: "GN"
# pixel decoder
PIXEL_DECODER_NAME: "MSDeformAttnPixelDecoder"
IN_FEATURES: ["res2", "res3", "res4", "res5"]
DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES: ["res3", "res4", "res5"]
COMMON_STRIDE: 4
TRANSFORMER_ENC_LAYERS: 6
ONE_FORMER:
TRANSFORMER_DECODER_NAME: "ContrastiveMultiScaleMaskedTransformerDecoder"
TRANSFORMER_IN_FEATURE: "multi_scale_pixel_decoder"
DEEP_SUPERVISION: True
NO_OBJECT_WEIGHT: 0.1
CLASS_WEIGHT: 2.0
MASK_WEIGHT: 5.0
DICE_WEIGHT: 5.0
CONTRASTIVE_WEIGHT: 0.5
CONTRASTIVE_TEMPERATURE: 0.07
HIDDEN_DIM: 256
NUM_OBJECT_QUERIES: 150
USE_TASK_NORM: True
NHEADS: 8
DROPOUT: 0.1
DIM_FEEDFORWARD: 2048
ENC_LAYERS: 0
PRE_NORM: False
ENFORCE_INPUT_PROJ: False
SIZE_DIVISIBILITY: 32
ENC_LAYERS: 0
CLASS_DEC_LAYERS: 2
DEC_LAYERS: 10 # 9 decoder layers, add one for the loss on learnable query
TRAIN_NUM_POINTS: 12544
OVERSAMPLE_RATIO: 3.0
IMPORTANCE_SAMPLE_RATIO: 0.75
TEXT_ENCODER:
WIDTH: 256
CONTEXT_LENGTH: 77
NUM_LAYERS: 6
VOCAB_SIZE: 49408
PROJ_NUM_LAYERS: 2
N_CTX: 16
TEST:
SEMANTIC_ON: True
INSTANCE_ON: True
PANOPTIC_ON: True
OVERLAP_THRESHOLD: 0.8
OBJECT_MASK_THRESHOLD: 0.8
TASK: "panoptic"
TEST:
DETECTIONS_PER_IMAGE: 150
2 changes: 2 additions & 0 deletions oneformer/data/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,6 @@
register_coco_panoptic_annos_semseg,
register_ade20k_instance,
register_coco_panoptic2instance,
register_mapillary_vistas,
register_mapillary_vistas_panoptic,
)
Loading

0 comments on commit d29a1a3

Please sign in to comment.