Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tps #15

Merged
merged 15 commits into from
Jul 2, 2023
Merged

Tps #15

Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
update synthia pipeline
  • Loading branch information
vivekvjk committed Jun 26, 2023
commit 3be1bb54eef8d08b0817139e10a603d9a78e17de
63 changes: 21 additions & 42 deletions configs/_base_/datasets/uda_synthiaSeq_CSSeq.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,25 @@
# dataset settings
FRAME_OFFSET = -1
dataset_type = 'SynthiaSeqDataset'
synthia_data_root = '/srv/share4/datasets/SynthiaSeq/SYNTHIA-SEQS-04-DAWN'
cs_data_root = '/coc/testnvme/datasets/VideoDA/cityscapes-seq'
cs_train_flow_dir = '/srv/share4/datasets/cityscapes-seq_Flow/flow/forward/train'

synthia_train_flow_dir = '/srv/share4/datasets/SynthiaSeq_Flow/frame_dist_1/forward/train/RGB/Stereo_Left/Omni_F'
cs_val_flow_dir = '/srv/share4/datasets/cityscapes-seq_Flow/flow/forward/val'

#backward flow
cs_train_flow_dir = "/coc/testnvme/datasets/VideoDA/cityscapes-seq_Flow/flow_test_bed/frame_dist_1/backward/train"
cs_val_flow_dir = "/coc/testnvme/datasets/VideoDA/cityscapes-seq_Flow/flow_test_bed/frame_dist_1/backward/val"

#forward flow
cs_train_flow_dir = "/coc/testnvme/datasets/VideoDA/cityscapes-seq_Flow/flow/forward/train"
cs_val_flow_dir = "/coc/testnvme/datasets/VideoDA/cityscapes-seq_Flow/flow/forward/val"

img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)

crop_size = (1024, 1024)
ignore_index = [5, 3, 16, 12, 201, 255] #need to edit


synthia_train_pipeline = {
"im_load_pipeline": [
Expand All @@ -21,10 +30,10 @@
dict(type='LoadImageFromFile'),
],
"load_flow_pipeline": [
dict(type='LoadFlowFromFileStub'),
dict(type='LoadFlowFromFile'),
],
"shared_pipeline": [
dict(type='Resize', img_scale=(2560, 1520)),
dict(type='Resize', img_scale=(2560, 1520)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
],
Expand All @@ -43,18 +52,6 @@
]
}

# cityscapes_train_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(type='LoadAnnotations'),
# dict(type='Resize', img_scale=(2048, 1024)),
# dict(type='RandomCrop', crop_size=crop_size),
# dict(type='RandomFlip', prob=0.5),
# # dict(type='PhotoMetricDistortion'), # is applied later in dacs.py
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
# dict(type='DefaultFormatBundle'),
# dict(type='Collect', keys=['img', 'gt_semantic_seg']),
# ]

cityscapes_train_pipeline = {
"im_load_pipeline": [
Expand Down Expand Up @@ -87,24 +84,6 @@
]
}

# test_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(
# type='MultiScaleFlipAug',
# img_scale=(2048, 1024),
# # MultiScaleFlipAug is disabled by not providing img_ratios and
# # setting flip=False
# # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
# flip=False,
# transforms=[
# dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='ImageToTensor', keys=['img']),
# dict(type='Collect', keys=['img']),
# ])
# ]

test_pipeline = {
"im_load_pipeline": [
dict(type='LoadImageFromFile'),
Expand Down Expand Up @@ -136,10 +115,7 @@
]
}


data = dict(
samples_per_gpu=2,
workers_per_gpu=4,
train=dict(
type='UDADataset',
source=dict(
Expand All @@ -159,8 +135,9 @@
ann_dir='gtFine/train',
split='splits/train.txt',
pipeline=cityscapes_train_pipeline,
frame_offset=1,
frame_offset=FRAME_OFFSET,
flow_dir=cs_train_flow_dir,
ignore_index=ignore_index
)
),
val=dict(
Expand All @@ -170,8 +147,9 @@
ann_dir='gtFine/val',
split='splits/val.txt',
pipeline=test_pipeline,
frame_offset=1,
flow_dir=cs_val_flow_dir
frame_offset=FRAME_OFFSET,
flow_dir=cs_val_flow_dir,
ignore_index=ignore_index
),
test=dict(
type='CityscapesSeqDataset',
Expand All @@ -180,7 +158,8 @@
ann_dir='gtFine/val',
split='splits/val.txt',
pipeline=test_pipeline,
frame_offset=1,
flow_dir=cs_val_flow_dir
frame_offset=FRAME_OFFSET,
flow_dir=cs_val_flow_dir,
ignore_index=ignore_index
)
)
52 changes: 40 additions & 12 deletions configs/mic/synthiaSeqHR2csHR_mic_hrda.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
target=dict(crop_pseudo_margins=[30, 240, 30, 30]),
),
# Use one separate thread/worker for data loading.
workers_per_gpu=2,
workers_per_gpu=3,
# Batch size
samples_per_gpu=2,
)
Expand All @@ -83,29 +83,57 @@
mask_lambda=1,
# Use random patch masking with a patch size of 64x64
# and a mask ratio of 0.7
l_warp_lambda=0,
l_mix_lambda=1.0,
l_warp_lambda=1.0,
l_mix_lambda=0.0,
consis_filter=False,
consis_confidence_filter=False,
consis_confidence_thresh=0,
consis_confidence_per_class_thresh=False,
consis_filter_rare_class=False,
pl_fill=False,
bottom_pl_fill=False,
source_only2=False,
oracle_mask=False,
warp_cutmix=False,
stub_training=False,
l_warp_begin=1500,
mask_generator=dict(
type='block', mask_ratio=0.7, mask_block_size=64, _delete_=True),
debug_mode=False,
class_mask_warp=None,
class_mask_cutmix=None,
exclusive_warp_cutmix=False,
modality="rgb",
modality_dropout_weights=None,
oracle_mask_add_noise=False,
oracle_mask_remove_pix=False,
oracle_mask_noise_percent=0.0,
TPS_warp_pl_confidence=False,
TPS_warp_pl_confidence_thresh=0.0,
)
# Optimizer Hyperparameters
optimizer_config = None
optimizer = dict(
lr=6e-05,
paramwise_cfg=dict(
custom_keys=dict(
head=dict(lr_mult=10.0),
pos_block=dict(decay_mult=0.0),
norm=dict(decay_mult=0.0))))
# optimizer = dict(
# lr=6e-05,
# paramwise_cfg=dict(
# custom_keys=dict(
# head=dict(lr_mult=10.0),
# pos_block=dict(decay_mult=0.0),
# norm=dict(decay_mult=0.0))))
n_gpus = None
launcher = "slurm" #"slurm"
gpu_model = 'A40'
runner = dict(type='IterBasedRunner', max_iters=40000)
# Logging Configuration
checkpoint_config = dict(by_epoch=False, interval=4000, max_keep_ckpts=8)
evaluation = dict(interval=2000, metric='mIoU', metrics=["mIoU", "pred_pred", "gt_pred", "M5", "mIoU_gt_pred"])
checkpoint_config = dict(by_epoch=False, interval=4000, max_keep_ckpts=3)
evaluation = dict(interval=4000, eval_settings={
"metrics": ["mIoU", "pred_pred", "gt_pred", "M5", "M5Fixed", "mIoU_gt_pred", "consis_confidence_filter"],
"sub_metrics": ["mask_count"],
"pixelwise accuracy": True,
"confusion matrix": True,
"return_logits": True,
"consis_confidence_thresh": 0.95
})
# Meta Information for Result Analysis
name = 'synthiaSeqHR2csHR_mic_hrda_s2_corrected'
exp = 'basic'
Expand Down
4 changes: 2 additions & 2 deletions configs/mic/viperHR2csHR_mic_hrda.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,8 @@
gpu_model = 'A40'
runner = dict(type='IterBasedRunner', max_iters=15000)
# Logging Configuration
checkpoint_config = dict(by_epoch=False, interval=3000, max_keep_ckpts=2)
evaluation = dict(interval=3000, eval_settings={
checkpoint_config = dict(by_epoch=False, interval=4000, max_keep_ckpts=3)
evaluation = dict(interval=4000, eval_settings={
"metrics": ["mIoU", "pred_pred", "gt_pred", "M5", "M5Fixed", "mIoU_gt_pred", "consis_confidence_filter"],
"sub_metrics": ["mask_count"],
"pixelwise accuracy": True,
Expand Down
6 changes: 3 additions & 3 deletions mmseg/datasets/SynthiaSeq.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class SynthiaSeqDataset(SeqUtils, SynthiaDataset):
"""Synthia Seq dataset with options for loading flow and neightboring frames.
"""

def __init__(self, split, img_suffix='.png', seg_map_suffix='_labelTrainIds_updated.png', frame_offset=1, flow_dir=None, **kwargs):
def __init__(self, split, img_suffix='.png', seg_map_suffix='_labelTrainIds_updated.png', frame_offset=1, flow_dir=None, data_type="rgb", **kwargs):
SynthiaDataset.__init__(
self, #must explicitly pass self
split=split,
Expand All @@ -28,13 +28,13 @@ def __init__(self, split, img_suffix='.png', seg_map_suffix='_labelTrainIds_upda

self.flow_dir = flow_dir
self.fut_images = self.load_annotations_seq(self.img_dir, self.img_suffix, self.ann_dir, self.seg_map_suffix, self.split, frame_offset=-1) #forward flow
# self.fut_images = self.load_annotations_seq(self.img_dir, self.img_suffix, self.ann_dir, self.seg_map_suffix, self.split, frame_offset=1) #backward flow
self.img_infos = self.load_annotations_seq(self.img_dir, self.img_suffix, self.ann_dir, self.seg_map_suffix, self.split, frame_offset=0)
self.flows = None if self.flow_dir == None else self.load_annotations_seq(self.img_dir, ".png", self.ann_dir, self.seg_map_suffix, self.split, frame_offset=0)

self.data_type = data_type


self.unpack_list = "train" in split


self.palette_to_id = [(k, i) for i, k in enumerate(self.PALETTE)]

25 changes: 25 additions & 0 deletions tools/exps/tps/synthia/tps_hrda_synthia.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/bin/bash
#SBATCH --job-name=$1
#SBATCH --output=$1.out
#SBATCH --error=$1.err
#SBATCH --gres=gpu:$2
#SBATCH --ntasks=$2
#SBATCH --ntasks-per-node=$2
#SBATCH --cpus-per-task=15
#SBATCH --constraint="a40"
#SBATCH --partition=short
#SBATCH --requeue
#SBATCH --open-mode=append
#SBATCH --exclude="ig-88,perseverance,cheetah,claptrap"

export PYTHONUNBUFFERED=TRUE
export MASTER_PORT=$P
source ~/.bashrc
conda activate openmmlab
cd /coc/scratch/vvijaykumar6/mmseg

set -x

#change begin to 1500
srun -u python -u ./tools/train.py configs/mic/synthiaSeqHR2csHR_mic_hrda.py --launcher="slurm" --l-warp-lambda=1 --l-mix-lambda=0 --l-warp-begin=0 --bottom-pl-fill True --no-masking True --TPS-warp-pl-confidence True --TPS-warp-pl-confidence-thresh 0.0 --lr 6e-5 --total-iters=40000 --seed 604 --deterministic --work-dir="./work_dirs/synthia_cs/tps_exp/$1$T" --nowandb True
# --wandbid $1$T