Skip to content

Commit

Permalink
update files
Browse files Browse the repository at this point in the history
  • Loading branch information
throwawaycoumbia committed Apr 13, 2020
1 parent e18a691 commit ef2d93f
Show file tree
Hide file tree
Showing 14 changed files with 50 additions and 50 deletions.
18 changes: 9 additions & 9 deletions binary_classify.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
from types import SimpleNamespace
from utils.kinetics_utils import *

sys.path.append('/local/vondrick/dave/fails')
sys.path.append('/local/vondrick/dave/fails/cnns')
sys.path.append("PATH/TO/fails")
sys.path.append("PATH/TO/cnns")


def make_uint8(im):
Expand Down Expand Up @@ -180,7 +180,7 @@ def load_checkpoint(checkpoint, filename='model_best.pth'):
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers')
parser.add_argument('-c', '--checkpoint',
default='/proj/vondrick/dave/checkpoints/sliding_weaksup/checkpoint_bincls_newborders', type=str,
default="PATH/TO/checkpoint_bincls_newborders", type=str,
metavar='PATH',
help='path to save checkpoint')
parser.add_argument('--resume_training', action='store_true')
Expand Down Expand Up @@ -214,16 +214,16 @@ def load_checkpoint(checkpoint, filename='model_best.pth'):
parser.add_argument('--clips_per_video', type=int, default=5)
# parser.add_argument('--in_vids', type=int, default=4)
# parser.add_argument('--in_step', type=int, default=1)
parser.add_argument('--save_path', default='/proj/vondrick/dave/results/sliding_weaksup/test_results_bincls_newborders')
parser.add_argument('--save_path', default="PATH/TO/test_results_bincls_newborders")
parser.add_argument(
'--fails_path', default='/local/vondrick/datasets/fails/scenes')
'--fails_path', default="PATH/TO/scenes")
parser.add_argument(
'--kinetics_path', default='/local/vondrick/datasets/Kinetics-600/data')
'--kinetics_path', default="PATH/TO/data")
parser.add_argument('--border_path', default="PATH/TO/borders.json")
parser.add_argument('--pretrain_path', default='/local/vondrick/dave/fails/checkpoint_kinetics/model_best.pt.tar')
parser.add_argument('--pretrain_path', default="PATH/TO/model_best.pt.tar")

parser.add_argument('--dataset_path', default='/proj/vondrick/dave/datasets')
# '--data_root', default='/proj/vondrick/datasets/fails/scenes_flow')
parser.add_argument('--dataset_path', default="PATH/TO/datasets")
# '--data_root', default="PATH/TO/scenes_flow")
parser.add_argument('--sample_size', type=int, default=112)
parser.add_argument('--sample_duration', type=int, default=16)
parser.add_argument('--n_kinetics_classes', type=int, default=600)
Expand Down
14 changes: 5 additions & 9 deletions dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ def __init__(self, fails_path, kinetics_path, frames_per_clip, step_between_clip
assert fails_path is None or fails_video_list is None
video_list = fails_video_list or glob(os.path.join(fails_path, '**', '*.mp4'), recursive=True)
if not fails_only:
kinetics_cls = torch.load('/local/vondrick/dave/fails/kinetics_classes.pt')
kinetics_dist = torch.load('/local/vondrick/dave/slidingwindow/fails_kinetics_features/dist.pt')
kinetics_cls = torch.load("PATH/TO/kinetics_classes.pt")
kinetics_dist = torch.load("PATH/TO/dist.pt")
s = len(video_list)
for i, n in kinetics_dist.items():
n *= s
Expand Down Expand Up @@ -213,14 +213,10 @@ def __init__(self, fails_path, kinetics_path, frames_per_clip, step_between_clip
self.video_clips.cumulative_sizes = clip_lengths.cumsum(0).tolist()
if kwargs['local_rank'] <= 0:
print(f'removed videos from {fns_removed} out of {len(self.video_clips.video_paths)} files')
# if not fails_path.startswith('/local/vondrick/datasets/fails/scenes'):
# if not fails_path.startswith("PATH/TO/scenes"):
for i, p in enumerate(self.video_clips.video_paths):
if '/local/vondrick' in p:
self.video_clips.video_paths[i] = p.replace('/local/vondrick/datasets/fails/scenes',
os.path.dirname(fails_path))
elif '/local3/vondrick3' in p:
self.video_clips.video_paths[i] = p.replace('/local3/vondrick3/datasets/fails/scenes',
os.path.dirname(fails_path))
self.video_clips.video_paths[i] = p.replace("PATH/TO/scenes",
os.path.dirname(fails_path))
self.debug_dataset = debug_dataset
if debug_dataset:
# self.video_clips = self.video_clips.subset([0])
Expand Down
4 changes: 2 additions & 2 deletions eval_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,9 +368,9 @@ def percentile(l, p):
data = json.load(f)

# fns = glob(
# '/local/vondrick/dave/slidingwindow/fails_kinetics_features/fails_kinetics_features_*.json')
# '"PATH/TO/fails_kinetics_features_*.json')"
#
# kin_cls = torch.load('/local/vondrick/dave/fails/kinetics_classes.pt')
# kin_cls = torch.load("PATH/TO/kinetics_classes.pt")
#
# kindata = {}
# for k, v in kinetics.process().items():
Expand Down
20 changes: 10 additions & 10 deletions fails_classify.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
import torch.distributed as distrib
import torch.nn as nn
import torch.nn.functional as F
from captum.attr import LayerAttribution
from captum.attr._core.grad_cam import LayerGradCam
# from captum.attr import LayerAttribution
# from captum.attr._core.grad_cam import LayerGradCam
from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
from torchvision.models.video import r3d_18
from torchvision.utils import save_image, make_grid
Expand All @@ -28,8 +28,8 @@
# from utils.kinetics_utils import *


# sys.path.append('/local/vondrick/dave/fails')
# sys.path.append('/local/vondrick/dave/fails/cnns')
# sys.path.append("PATH/TO/fails")
# sys.path.append("PATH/TO/cnns")


def make_uint8(im):
Expand Down Expand Up @@ -489,17 +489,17 @@ def show(img):
# parser.add_argument('--in_step', type=int, default=1)
parser.add_argument('--save_path')
parser.add_argument(
'--fails_path', default='/local/vondrick/datasets/fails/scenes')
'--fails_path', default="PATH/TO/scenes")
parser.add_argument(
'--fails_flow_path', default='/local/vondrick/datasets/fails/scenes_flow_small')
'--fails_flow_path', default="PATH/TO/scenes_flow_small")
parser.add_argument(
'--kinetics_path', default='/local/vondrick/datasets/Kinetics-600/data')
'--kinetics_path', default="PATH/TO/data")
parser.add_argument('--border_path', default="PATH/TO/borders.json")
parser.add_argument('--pretrain_path', default='/local/vondrick/dave/fails/checkpoint_kinetics/model_best.pt.tar')
parser.add_argument('--pretrain_path', default="PATH/TO/model_best.pt.tar")

parser.add_argument('--dataset_path', default='/proj/vondrick/dave/datasets')
parser.add_argument('--dataset_path', default="PATH/TO/datasets")
parser.add_argument('--remove_fns')
# '--data_root', default='/proj/vondrick/datasets/fails/scenes_flow')
# '--data_root', default="PATH/TO/scenes_flow")
parser.add_argument('--sample_size', type=int, default=112)
parser.add_argument('--sample_duration', type=int, default=16)
parser.add_argument('--n_kinetics_classes', type=int, default=600)
Expand Down
6 changes: 3 additions & 3 deletions plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import plotly.graph_objects as go
import torch

plotly.io.orca.config.executable = '/proj/vondrick/shared/envs/pytorch12/bin/orca'
plotly.io.orca.config.executable = "PATH/TO/orca"

'''
Notes: 73% of videos pass the filter
Expand Down Expand Up @@ -41,7 +41,7 @@ def __exit__(self, type, value, traceback):


class FigureBuilder:
def __init__(self, anns, confusion=None, filetype='pdf', basepath='/proj/vondrick/dave/www/figures/oops/tmp',
def __init__(self, anns, confusion=None, filetype='pdf', basepath="PATH/TO/tmp",
ok_names=None):
self.anns = anns
self.filetype = filetype
Expand Down Expand Up @@ -160,7 +160,7 @@ def gen_fig(self, name):
}
}
elif name == 'vid_len':
vid_basepath = '/proj/vondrick/datasets/fails/scenes/'
vid_basepath = "PATH/TO/"
with open(os.path.join(vid_basepath, 'validcliplens.json'), 'r') as fff:
validcliplens = json.load(fff)

Expand Down
8 changes: 4 additions & 4 deletions utils/compute_kinetics_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
from spatial_transforms import Normalize, Compose, ToTensor, Scale, RandomScaleCrop
from torchvision.datasets.folder import default_loader
import json
sys.path.append('/local/vondrick/dave/fails')
sys.path.append('/local/vondrick/dave/fails/cnns')
sys.path.append("PATH/TO/fails")
sys.path.append("PATH/TO/cnns")


def get_mean(norm_value=255, dataset='activitynet'):
Expand Down Expand Up @@ -141,7 +141,7 @@ def trim_borders(img, fn):
torch.distributed.init_process_group(
backend='nccl', init_method='env://')

basepath = '/local/vondrick/datasets/fails/scenes_small'
basepath = "PATH/TO/scenes_small"
with open("PATH/TO/borders.json") as f:
border_file = json.load(f)
img_xform = Compose([RandomScaleCrop((1,), center=True), Resize((args.sample_size, args.sample_size)), ToTensor(
Expand Down Expand Up @@ -184,7 +184,7 @@ def loaderfn(fn): return (default_loader(fn), fn)
model = torch.nn.DataParallel(model)

checkpoint = torch.load(
'/local/vondrick/dave/fails/checkpoint_kinetics/model_best.pt.tar', map_location=device)
"PATH/TO/model_best.pt.tar", map_location=device)
model.load_state_dict(checkpoint['state_dict'])

results = []
Expand Down
4 changes: 2 additions & 2 deletions utils/compute_places_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def trim_borders(img, fn):
torch.distributed.init_process_group(
backend='nccl', init_method='env://')

basepath = '/local/vondrick/datasets/fails/scenes_small'
basepath = "PATH/TO/scenes_small"
with open("PATH/TO/borders.json") as f:
border_file = json.load(f)
# img_xform = Compose([RandomScaleCrop((1,), center=True), Resize((args.sample_size, args.sample_size)), ToTensor(
Expand Down Expand Up @@ -167,7 +167,7 @@ def loaderfn(fn): return (default_loader(fn), fn)
model = torch.nn.DataParallel(model)

# checkpoint = torch.load(
# '/local/vondrick/dave/fails/checkpoint_kinetics/model_best.pt.tar', map_location=device)
# "PATH/TO/model_best.pt.tar", map_location=device)
# model.load_state_dict(checkpoint['state_dict'])

results = []
Expand Down
4 changes: 2 additions & 2 deletions utils/crop_videos.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
with open("PATH/TO/borders.json") as f:
fails_borders = json.load(f)

path = '/local3/vondrick3/datasets/fails/scenes'
path = 'PATH/TO/scenes'

newpath = '/proj/vondrick/datasets/fails/scene_clips_split_cropped'
newpath = "PATH/TO/scene_clips_split_cropped"

vids = glob(os.path.join(path, '*', '*', '*.mp4'))

Expand Down
6 changes: 3 additions & 3 deletions utils/get_pose.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
def job(fn, gpuid):
env = os.environ.copy()
env['CUDA_VISIBLE_DEVICES'] = str(gpuid)
result = subprocess.run(['python', '/local/vondrick/tools/alphapose/video_demo.py', '--video', fn, '--outdir',
'/local/vondrick/datasets/fails/scenes_pose', '--save_video', '--sp', '--nThreads', str(1)], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = subprocess.run(['python', "PATH/TO/video_demo.py', '--video', fn, '--outdir",
"PATH/TO/scenes_pose', '--save_video', '--sp', '--nThreads", str(1)], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return result

def worker(inq, outq, lock, wid, ngpus):
Expand All @@ -26,7 +26,7 @@ def worker(inq, outq, lock, wid, ngpus):


if __name__ == "__main__":
fns = glob('/local/vondrick/datasets/fails/scenes/*.mp4')
fns = glob("PATH/TO/*.mp4")
inq = Queue()
outq = Queue()
lock = Lock()
Expand Down
4 changes: 2 additions & 2 deletions utils/imgs_to_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
def job(path, gpuid):
env = os.environ.copy()
env['CUDA_VISIBLE_DEVICES'] = str(gpuid)
result = subprocess.run(['python', '/local/vondrick/shared/flownet2-pytorch/main.py', '--inference', '--model', 'FlowNet2', '--save_flow', '--inference_dataset', 'ImagesFromFolder', '--inference_dataset_root', path, '--resume', '/local/vondrick/shared/flownet2-pytorch/checkpoint/FlowNet2_checkpoint.pth.tar', '--save', path, '--name', 'flow'], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = subprocess.run(['python', "PATH/TO/FlowNet2_checkpoint.pth.tar', '--save', path, '--name', 'flow"], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return result

def worker(inq, outq, lock, wid, ngpus):
Expand All @@ -24,7 +24,7 @@ def worker(inq, outq, lock, wid, ngpus):


if __name__ == "__main__":
fns = sorted(glob('/proj/vondrick/datasets/fails/scenes/*/*/*'))
fns = sorted(glob("PATH/TO/*"))
inq = Queue()
outq = Queue()
lock = Lock()
Expand Down
4 changes: 2 additions & 2 deletions utils/kinetics_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
import collections
import sys
if True:
sys.path.append('/local/vondrick/dave/fails')
sys.path.append('/local/vondrick/dave/fails/cnns')
sys.path.append("PATH/TO/fails")
sys.path.append("PATH/TO/cnns")
# from cnns import model as model3d
from types import SimpleNamespace
import cv2
Expand Down
2 changes: 1 addition & 1 deletion utils/parse_kinetics_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

def process():
fns = glob(
'/local/vondrick/dave/slidingwindow/fails_kinetics_features/fails_kinetics_preds_*.json')
"PATH/TO/fails_kinetics_preds_*.json")

data = []
for fn in fns:
Expand Down
4 changes: 4 additions & 0 deletions utils/progress/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
*.pyc
*.egg-info
build/
dist/
2 changes: 1 addition & 1 deletion utils/vids_to_imgs.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def worker(inq, outq, lock):
outq = Queue()
lock = Lock()
nproc = 40
#basepath = "/local/vondrick/datasets/fails/scenes"
#basepath = "PATH/TO/scenes"
basepath = "YOUR PATH HERE"
outdir = "YOUR PATH HERE"
data=glob(os.path.join(basepath, '**/*.mp4'), recursive=True)
Expand Down

0 comments on commit ef2d93f

Please sign in to comment.