From abbb5d136dd1b0aa58d18405cba3a7147e221df3 Mon Sep 17 00:00:00 2001 From: Kai-46 Date: Mon, 12 Oct 2020 11:05:53 -0400 Subject: [PATCH] clean code --- .gitignore | 5 +- README.md | 23 ++++ configs/tanks_and_temples/.DS_Store | Bin 0 -> 6148 bytes .../tat_intermediate_m60.txt | 49 ------- .../tat_intermediate_playground.txt | 49 ------- .../tat_intermediate_playground_bignet.txt | 48 ------- .../tat_intermediate_train.txt | 49 ------- .../tanks_and_temples/tat_training_truck.txt | 22 +-- .../tat_training_truck_bignet.txt | 48 ------- .../tat_training_truck_subset.txt | 47 ------- .../tat_intermediate_playground_addcarve.txt | 54 -------- .../tat_intermediate_playground_addparam.txt | 54 -------- ..._intermediate_playground_addregularize.txt | 54 -------- .../tat_training_truck_addcarve.txt | 55 -------- .../tat_training_truck_addparam.txt | 55 -------- .../tat_training_truck_addregularize.txt | 55 -------- data_loader_split.py | 14 +- ddp_model.py | 56 ++++++-- ddp_test_nerf.py | 50 +------ ddp_run_nerf.py => ddp_train_nerf.py | 130 +++++++++++------- nerf_sample_ray_split.py | 5 +- render_all.sh | 19 --- render_tat_training_truck.sh | 24 ---- sparse_playground.sh | 16 --- sparse_playground_addcarve.sh | 16 --- sparse_playground_addregularize.sh | 16 --- sparse_truck.sh | 16 --- sparse_truck_addcarve.sh | 16 --- sparse_truck_addregularize.sh | 16 --- train_lf_africa.sh | 16 --- train_lf_basket.sh | 17 --- train_lf_ship.sh | 16 --- train_lf_torch.sh | 16 --- train_tat_intermediate_m60.sh | 18 --- train_tat_intermediate_playground.sh | 18 --- train_tat_intermediate_playground_bignet.sh | 15 -- train_tat_intermediate_train.sh | 18 --- train_tat_training_truck.sh | 16 --- train_tat_training_truck_bignet.sh | 15 -- train_tat_truck_sphere_subset.sh | 19 --- 40 files changed, 181 insertions(+), 1064 deletions(-) create mode 100644 configs/tanks_and_temples/.DS_Store delete mode 100644 configs/tanks_and_temples/tat_intermediate_m60.txt delete mode 100644 configs/tanks_and_temples/tat_intermediate_playground.txt delete mode 100644 configs/tanks_and_temples/tat_intermediate_playground_bignet.txt delete mode 100644 configs/tanks_and_temples/tat_intermediate_train.txt delete mode 100644 configs/tanks_and_temples/tat_training_truck_bignet.txt delete mode 100644 configs/tanks_and_temples/tat_training_truck_subset.txt delete mode 100644 configs/tanks_and_temples_sparse/tat_intermediate_playground_addcarve.txt delete mode 100644 configs/tanks_and_temples_sparse/tat_intermediate_playground_addparam.txt delete mode 100644 configs/tanks_and_temples_sparse/tat_intermediate_playground_addregularize.txt delete mode 100644 configs/tanks_and_temples_sparse/tat_training_truck_addcarve.txt delete mode 100644 configs/tanks_and_temples_sparse/tat_training_truck_addparam.txt delete mode 100644 configs/tanks_and_temples_sparse/tat_training_truck_addregularize.txt rename ddp_run_nerf.py => ddp_train_nerf.py (91%) delete mode 100755 render_all.sh delete mode 100755 render_tat_training_truck.sh delete mode 100755 sparse_playground.sh delete mode 100755 sparse_playground_addcarve.sh delete mode 100755 sparse_playground_addregularize.sh delete mode 100755 sparse_truck.sh delete mode 100755 sparse_truck_addcarve.sh delete mode 100755 sparse_truck_addregularize.sh delete mode 100755 train_lf_africa.sh delete mode 100755 train_lf_basket.sh delete mode 100755 train_lf_ship.sh delete mode 100755 train_lf_torch.sh delete mode 100755 train_tat_intermediate_m60.sh delete mode 100755 train_tat_intermediate_playground.sh delete mode 100755 train_tat_intermediate_playground_bignet.sh delete mode 100755 train_tat_intermediate_train.sh delete mode 100755 train_tat_training_truck.sh delete mode 100755 train_tat_training_truck_bignet.sh delete mode 100755 train_tat_truck_sphere_subset.sh diff --git a/.gitignore b/.gitignore index 57170e6..9155c02 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,9 @@ # scripts *.sh +# mac +.DS_Store + # pycharm .idea/ @@ -141,4 +144,4 @@ dmypy.json .pytype/ # Cython debug symbols -cython_debug/ \ No newline at end of file +cython_debug/ diff --git a/README.md b/README.md index e69de29..3ffda6e 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,23 @@ +# NeRF++ +Codebase for paper: +* Work with 360 capture of large-scale unbounded scenes. +* Support multi-gpu training and inference. + +## Data +* Download our preprocessed data from [tanks_and_temples](), [lf_data](). +* Put the data in the code directory. +* Data format. +** Each scene consists of 3 splits: train/test/validation. +** Intrinsics and poses are stored as flattened 4x4 matrices. +** Opencv camera coordinate system is adopted, i.e., x--->right, y--->down, z--->scene. +* Scene normalization: move the average camera center to origin, and put all the camera centers inside the unit sphere. + +## Training +```python +python ddp_train_nerf.py --config configs/tanks_and_temples/tat_training_truck.txt +``` + +## Testing +```python +python ddp_test_nerf.py --config configs/tanks_and_temples/tat_training_truck.txt --render_splits test,camera_path +``` \ No newline at end of file diff --git a/configs/tanks_and_temples/.DS_Store b/configs/tanks_and_temples/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 0: @@ -67,11 +74,12 @@ def load_data_split(basedir, scene, split, skip=1, try_load_min_depth=True): else: mindepth_files = [None, ] * cam_cnt - # assume all images have the same size + # assume all images have the same size as training image train_imgfile = find_files('{}/{}/train/rgb'.format(basedir, scene), exts=['*.png', '*.jpg'])[0] train_im = imageio.imread(train_imgfile) H, W = train_im.shape[:2] + # create ray samplers ray_samplers = [] for i in range(cam_cnt): intrinsics = parse_txt(intrinsics_files[i]) diff --git a/ddp_model.py b/ddp_model.py index 1d98f05..07cd0fb 100644 --- a/ddp_model.py +++ b/ddp_model.py @@ -5,6 +5,9 @@ import torch.nn as nn from utils import TINY_NUMBER, HUGE_NUMBER from collections import OrderedDict from nerf_network import Embedder, MLPNet +import os +import logging +logger = logging.getLogger(__package__) ###################################################################################### @@ -44,14 +47,6 @@ def depth2pts_outside(ray_o, ray_d, depth): class NerfNet(nn.Module): def __init__(self, args): - ''' - :param D: network depth - :param W: network width - :param input_ch: input channels for encodings of (x, y, z) - :param input_ch_viewdirs: input channels for encodings of view directions - :param skips: skip connection in network - :param use_viewdirs: if True, will use the view directions as input - ''' super().__init__() # foreground self.fg_embedder_position = Embedder(input_dim=3, @@ -146,3 +141,48 @@ class NerfNet(nn.Module): ('bg_depth', bg_depth_map), ('bg_lambda', bg_lambda)]) return ret + + +def remap_name(name): + name = name.replace('.', '-') # dot is not allowed by pytorch + if name[-1] == '/': + name = name[:-1] + idx = name.rfind('/') + for i in range(2): + if idx >= 0: + idx = name[:idx].rfind('/') + return name[idx + 1:] + + +class NerfNetWithAutoExpo(nn.Module): + def __init__(self, args, optim_autoexpo=False, img_names=None): + super().__init__() + self.nerf_net = NerfNet(args) + + self.optim_autoexpo = optim_autoexpo + if self.optim_autoexpo: + assert(img_names is not None) + logger.info('Optimizing autoexposure!') + + self.img_names = [remap_name(x) for x in img_names] + logger.info('\n'.join(self.img_names)) + self.autoexpo_params = nn.ParameterDict(OrderedDict([(x, nn.Parameter(torch.Tensor([0.5, 0.]))) for x in self.img_names])) + + def forward(self, ray_o, ray_d, fg_z_max, fg_z_vals, bg_z_vals, img_name=None): + ''' + :param ray_o, ray_d: [..., 3] + :param fg_z_max: [...,] + :param fg_z_vals, bg_z_vals: [..., N_samples] + :return + ''' + ret = self.nerf_net(ray_o, ray_d, fg_z_max, fg_z_vals, bg_z_vals) + + if img_name is not None: + img_name = remap_name(img_name) + if self.optim_autoexpo and (img_name in self.autoexpo_params): + autoexpo = self.autoexpo_params[img_name] + scale = torch.abs(autoexpo[0]) + 0.5 # make sure scale is always positive + shift = autoexpo[1] + ret['autoexpo'] = (scale, shift) + + return ret diff --git a/ddp_test_nerf.py b/ddp_test_nerf.py index 266d657..8e6c9b8 100644 --- a/ddp_test_nerf.py +++ b/ddp_test_nerf.py @@ -2,17 +2,17 @@ import torch # import torch.nn as nn import torch.optim import torch.distributed -from torch.nn.parallel import DistributedDataParallel as DDP +# from torch.nn.parallel import DistributedDataParallel as DDP import torch.multiprocessing import numpy as np import os -from collections import OrderedDict -from ddp_model import NerfNet +# from collections import OrderedDict +# from ddp_model import NerfNet import time from data_loader_split import load_data_split from utils import mse2psnr, colorize_np, to8b import imageio -from ddp_run_nerf import config_parser, setup_logger, setup, cleanup, render_single_image +from ddp_train_nerf import config_parser, setup_logger, setup, cleanup, render_single_image, create_nerf import logging @@ -37,46 +37,7 @@ def ddp_test_nerf(rank, args): args.chunk_size = 4096 ###### create network and wrap in ddp; each process should do this - # fix random seed just to make sure the network is initialized with same weights at different processes - torch.manual_seed(777) - # very important!!! otherwise it might introduce extra memory in rank=0 gpu - torch.cuda.set_device(rank) - - models = OrderedDict() - models['cascade_level'] = args.cascade_level - models['cascade_samples'] = [int(x.strip()) for x in args.cascade_samples.split(',')] - for m in range(models['cascade_level']): - net = NerfNet(args).to(rank) - net = DDP(net, device_ids=[rank], output_device=rank) - optim = torch.optim.Adam(net.parameters(), lr=args.lrate) - models['net_{}'.format(m)] = net - models['optim_{}'.format(m)] = optim - - start = -1 - - ###### load pretrained weights; each process should do this - if (args.ckpt_path is not None) and (os.path.isfile(args.ckpt_path)): - ckpts = [args.ckpt_path] - else: - ckpts = [os.path.join(args.basedir, args.expname, f) - for f in sorted(os.listdir(os.path.join(args.basedir, args.expname))) if f.endswith('.pth')] - def path2iter(path): - tmp = os.path.basename(path)[:-4] - idx = tmp.rfind('_') - return int(tmp[idx + 1:]) - ckpts = sorted(ckpts, key=path2iter) - logger.info('Found ckpts: {}'.format(ckpts)) - if len(ckpts) > 0 and not args.no_reload: - fpath = ckpts[-1] - logger.info('Reloading from: {}'.format(fpath)) - start = path2iter(fpath) - # configure map_location properly for different processes - map_location = {'cuda:%d' % 0: 'cuda:%d' % rank} - to_load = torch.load(fpath, map_location=map_location) - for m in range(models['cascade_level']): - for name in ['net_{}'.format(m), 'optim_{}'.format(m)]: - models[name].load_state_dict(to_load[name]) - models[name].load_state_dict(to_load[name]) + start, models = create_nerf(rank, args) render_splits = [x.strip() for x in args.render_splits.strip().split(',')] # start testing @@ -157,4 +118,3 @@ if __name__ == '__main__': setup_logger() test() - diff --git a/ddp_run_nerf.py b/ddp_train_nerf.py similarity index 91% rename from ddp_run_nerf.py rename to ddp_train_nerf.py index c9f14f5..a3bf88b 100644 --- a/ddp_run_nerf.py +++ b/ddp_train_nerf.py @@ -1,18 +1,20 @@ import torch -# import torch.nn as nn +import torch.nn as nn import torch.optim import torch.distributed from torch.nn.parallel import DistributedDataParallel as DDP import torch.multiprocessing import os from collections import OrderedDict -from ddp_model import NerfNet +from ddp_model import NerfNetWithAutoExpo import time from data_loader_split import load_data_split import numpy as np from tensorboardX import SummaryWriter from utils import img2mse, mse2psnr, img_HWC2CHW, colorize, TINY_NUMBER import logging +import json + logger = logging.getLogger(__package__) @@ -274,41 +276,7 @@ def cleanup(): torch.distributed.destroy_process_group() -def ddp_train_nerf(rank, args): - ###### set up multi-processing - setup(rank, args.world_size) - ###### set up logger - logger = logging.getLogger(__package__) - setup_logger() - - ###### decide chunk size according to gpu memory - logger.info('gpu_mem: {}'.format(torch.cuda.get_device_properties(rank).total_memory)) - if torch.cuda.get_device_properties(rank).total_memory / 1e9 > 14: - logger.info('setting batch size according to 24G gpu') - args.N_rand = 1024 - args.chunk_size = 8192 - else: - logger.info('setting batch size according to 12G gpu') - args.N_rand = 512 - args.chunk_size = 4096 - - ###### Create log dir and copy the config file - if rank == 0: - os.makedirs(os.path.join(args.basedir, args.expname), exist_ok=True) - f = os.path.join(args.basedir, args.expname, 'args.txt') - with open(f, 'w') as file: - for arg in sorted(vars(args)): - attr = getattr(args, arg) - file.write('{} = {}\n'.format(arg, attr)) - if args.config is not None: - f = os.path.join(args.basedir, args.expname, 'config.txt') - with open(f, 'w') as file: - file.write(open(args.config, 'r').read()) - torch.distributed.barrier() - - ray_samplers = load_data_split(args.datadir, args.scene, split='train', try_load_min_depth=args.load_min_depth) - val_ray_samplers = load_data_split(args.datadir, args.scene, split='validation', try_load_min_depth=args.load_min_depth) - +def create_nerf(rank, args): ###### create network and wrap in ddp; each process should do this # fix random seed just to make sure the network is initialized with same weights at different processes torch.manual_seed(777) @@ -319,8 +287,15 @@ def ddp_train_nerf(rank, args): models['cascade_level'] = args.cascade_level models['cascade_samples'] = [int(x.strip()) for x in args.cascade_samples.split(',')] for m in range(models['cascade_level']): - net = NerfNet(args).to(rank) - net = DDP(net, device_ids=[rank], output_device=rank) + img_names = None + if args.optim_autoexpo: + # load training image names for autoexposure + f = os.path.join(args.basedir, args.expname, 'train_images.json') + with open(f) as file: + img_names = json.load(file) + net = NerfNetWithAutoExpo(args, optim_autoexpo=args.optim_autoexpo, img_names=img_names).to(rank) + net = DDP(net, device_ids=[rank], output_device=rank, find_unused_parameters=True) + # net = DDP(net, device_ids=[rank], output_device=rank) optim = torch.optim.Adam(net.parameters(), lr=args.lrate) models['net_{}'.format(m)] = net models['optim_{}'.format(m)] = optim @@ -351,6 +326,56 @@ def ddp_train_nerf(rank, args): models[name].load_state_dict(to_load[name]) models[name].load_state_dict(to_load[name]) + return start, models + + +def ddp_train_nerf(rank, args): + ###### set up multi-processing + setup(rank, args.world_size) + ###### set up logger + logger = logging.getLogger(__package__) + setup_logger() + + ###### decide chunk size according to gpu memory + logger.info('gpu_mem: {}'.format(torch.cuda.get_device_properties(rank).total_memory)) + if torch.cuda.get_device_properties(rank).total_memory / 1e9 > 14: + logger.info('setting batch size according to 24G gpu') + args.N_rand = 1024 + args.chunk_size = 8192 + else: + logger.info('setting batch size according to 12G gpu') + args.N_rand = 512 + args.chunk_size = 4096 + + ###### Create log dir and copy the config file + if rank == 0: + os.makedirs(os.path.join(args.basedir, args.expname), exist_ok=True) + f = os.path.join(args.basedir, args.expname, 'args.txt') + with open(f, 'w') as file: + for arg in sorted(vars(args)): + attr = getattr(args, arg) + file.write('{} = {}\n'.format(arg, attr)) + if args.config is not None: + f = os.path.join(args.basedir, args.expname, 'config.txt') + with open(f, 'w') as file: + file.write(open(args.config, 'r').read()) + torch.distributed.barrier() + + ray_samplers = load_data_split(args.datadir, args.scene, split='train', + try_load_min_depth=args.load_min_depth) + val_ray_samplers = load_data_split(args.datadir, args.scene, split='validation', + try_load_min_depth=args.load_min_depth, skip=args.testskip) + + # write training image names for autoexposure + if args.optim_autoexpo: + f = os.path.join(args.basedir, args.expname, 'train_images.json') + with open(f, 'w') as file: + img_names = [ray_samplers[i].img_path for i in range(len(ray_samplers))] + json.dump(img_names, file, indent=2) + + ###### create network and wrap in ddp; each process should do this + start, models = create_nerf(rank, args) + ##### important!!! # make sure different processes sample different rays np.random.seed((rank + 1) * 777) @@ -416,13 +441,23 @@ def ddp_train_nerf(rank, args): bg_depth, _ = torch.sort(torch.cat((bg_depth, bg_depth_samples), dim=-1)) optim.zero_grad() - ret = net(ray_batch['ray_o'], ray_batch['ray_d'], fg_far_depth, fg_depth, bg_depth) + ret = net(ray_batch['ray_o'], ray_batch['ray_d'], fg_far_depth, fg_depth, bg_depth, img_name=ray_batch['img_name']) all_rets.append(ret) rgb_gt = ray_batch['rgb'].to(rank) - loss = img2mse(ret['rgb'], rgb_gt) - scalars_to_log['level_{}/loss'.format(m)] = loss.item() - scalars_to_log['level_{}/pnsr'.format(m)] = mse2psnr(loss.item()) + if 'autoexpo' in ret: + scale, shift = ret['autoexpo'] + scalars_to_log['level_{}/autoexpo_scale'.format(m)] = scale.item() + scalars_to_log['level_{}/autoexpo_shift'.format(m)] = shift.item() + # rgb_gt = scale * rgb_gt + shift + rgb_pred = (ret['rgb'] - shift) / scale + rgb_loss = img2mse(rgb_pred, rgb_gt) + loss = rgb_loss + args.lambda_autoexpo * (torch.abs(scale-1.)+torch.abs(shift)) + else: + rgb_loss = img2mse(ret['rgb'], rgb_gt) + loss = rgb_loss + scalars_to_log['level_{}/loss'.format(m)] = rgb_loss.item() + scalars_to_log['level_{}/pnsr'.format(m)] = mse2psnr(rgb_loss.item()) loss.backward() optim.step() @@ -462,7 +497,7 @@ def ddp_train_nerf(rank, args): logger.info('Logged a random training view in {} seconds'.format(dt)) log_view_to_tb(writer, global_step, log_data, gt_img=ray_samplers[idx].get_img(), mask=None, prefix='train/') - log_data = None + del log_data torch.cuda.empty_cache() if rank == 0 and (global_step % args.i_weights == 0 and global_step > 0): @@ -523,6 +558,11 @@ def config_parser(): # multiprocess learning parser.add_argument("--world_size", type=int, default='-1', help='number of processes') + # optimize autoexposure + parser.add_argument("--optim_autoexpo", action='store_true', + help='optimize autoexposure parameters') + parser.add_argument("--lambda_autoexpo", type=float, default=1., help='regularization weight for autoexposure') + # learning rate options parser.add_argument("--lrate", type=float, default=5e-4, help='learning rate') parser.add_argument("--lrate_decay_factor", type=float, default=0.1, @@ -530,8 +570,6 @@ def config_parser(): parser.add_argument("--lrate_decay_steps", type=int, default=5000, help='decay learning rate by a factor every specified number of steps') # rendering options - parser.add_argument("--inv_uniform", action='store_true', - help='if True, will uniformly sample inverse depths') parser.add_argument("--det", action='store_true', help='deterministic sampling for coarse and fine samples') parser.add_argument("--max_freq_log2", type=int, default=10, help='log2 of max freq for positional encoding (3D location)') diff --git a/nerf_sample_ray_split.py b/nerf_sample_ray_split.py index c3baac6..b7a02cb 100644 --- a/nerf_sample_ray_split.py +++ b/nerf_sample_ray_split.py @@ -172,11 +172,12 @@ class RaySamplerSingleImage(object): ('depth', depth), ('rgb', rgb), ('mask', mask), - ('min_depth', min_depth) + ('min_depth', min_depth), + ('img_name', self.img_path) ]) # return torch tensors for k in ret: - if ret[k] is not None: + if isinstance(ret[k], np.ndarray): ret[k] = torch.from_numpy(ret[k]) return ret diff --git a/render_all.sh b/render_all.sh deleted file mode 100755 index 576c904..0000000 --- a/render_all.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --gres=gpu:4 -#SBATCH -c 10 -#SBATCH -C pascal -#SBATCH --mem=40G -#SBATCH --time=24:00:00 -#SBATCH --output=slurm_%A.out -#SBATCH --qos=high - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -#$PYTHON -u $CODE_DIR/ddp_test_nerf.py --config $CODE_DIR/configs/lf_data/lf_africa.txt - - -$PYTHON -u $CODE_DIR/ddp_test_nerf.py --config $CODE_DIR/configs/tanks_and_temples/tat_training_truck.txt diff --git a/render_tat_training_truck.sh b/render_tat_training_truck.sh deleted file mode 100755 index 14385e6..0000000 --- a/render_tat_training_truck.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -#SBATCH -p q6 -#SBATCH --gres=gpu:3 -#SBATCH -c 8 -#SBATCH -C turing -#SBATCH --mem=16G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest -echo $CODE_DIR - -#$PYTHON -u $CODE_DIR/run_nerf.py --config $CODE_DIR/configs/tanks_and_temples/tat_training_truck_addregularize.txt -#$PYTHON -u $CODE_DIR/nerf_render_path.py --config $CODE_DIR/configs/tanks_and_temples/tat_training_truck_addregularize.txt -#$PYTHON -u $CODE_DIR/nerf_render_image.py --config $CODE_DIR/configs/tanks_and_temples/tat_training_truck_addregularize.txt - -$PYTHON -u $CODE_DIR/nerf_render_path.py --config $CODE_DIR/configs/tanks_and_temples/tat_intermediate_playground.txt -$PYTHON -u $CODE_DIR/nerf_render_image.py --config $CODE_DIR/configs/tanks_and_temples/tat_intermediate_playground.txt - -#$PYTHON -u $CODE_DIR/nerf_render_path.py --config $CODE_DIR/configs/tanks_and_temples/tat_training_truck_addregularize.txt -#$PYTHON -u $CODE_DIR/nerf_render_image.py --config $CODE_DIR/configs/tanks_and_temples/tat_training_truck_addregularize.txt diff --git a/sparse_playground.sh b/sparse_playground.sh deleted file mode 100755 index 33995fb..0000000 --- a/sparse_playground.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#SBATCH -p q6 -#SBATCH --gres=gpu:4 -#SBATCH -c 10 -#SBATCH -C turing -#SBATCH --mem=60G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/tanks_and_temples_sparse/tat_intermediate_playground_addparam.txt diff --git a/sparse_playground_addcarve.sh b/sparse_playground_addcarve.sh deleted file mode 100755 index bf27a25..0000000 --- a/sparse_playground_addcarve.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#SBATCH -p q6 -#SBATCH --gres=gpu:4 -#SBATCH -c 10 -#SBATCH -C turing -#SBATCH --mem=60G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/tanks_and_temples_sparse/tat_intermediate_playground_addcarve.txt diff --git a/sparse_playground_addregularize.sh b/sparse_playground_addregularize.sh deleted file mode 100755 index 14a92bb..0000000 --- a/sparse_playground_addregularize.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#SBATCH -p q6 -#SBATCH --gres=gpu:4 -#SBATCH -c 10 -#SBATCH -C turing -#SBATCH --mem=60G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/tanks_and_temples_sparse/tat_intermediate_playground_addregularize.txt diff --git a/sparse_truck.sh b/sparse_truck.sh deleted file mode 100755 index 542b320..0000000 --- a/sparse_truck.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --gres=gpu:4 -#SBATCH -c 10 -#SBATCH -C turing -#SBATCH --mem=60G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/tanks_and_temples_sparse/tat_training_truck_addparam.txt diff --git a/sparse_truck_addcarve.sh b/sparse_truck_addcarve.sh deleted file mode 100755 index a071fb2..0000000 --- a/sparse_truck_addcarve.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --gres=gpu:4 -#SBATCH -c 10 -####SBATCH -C turing -#SBATCH --mem=60G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/tanks_and_temples_sparse/tat_training_truck_addcarve.txt diff --git a/sparse_truck_addregularize.sh b/sparse_truck_addregularize.sh deleted file mode 100755 index 67977cd..0000000 --- a/sparse_truck_addregularize.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --gres=gpu:4 -#SBATCH -c 10 -#SBATCH -C turing -#SBATCH --mem=60G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/tanks_and_temples_sparse/tat_training_truck_addregularize.txt diff --git a/train_lf_africa.sh b/train_lf_africa.sh deleted file mode 100755 index 1d84838..0000000 --- a/train_lf_africa.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --gres=gpu:8 -#SBATCH -c 10 -#SBATCH -C turing -#SBATCH --mem=80G -#SBATCH --time=24:00:00 -#SBATCH --output=slurm_%A.out -#SBATCH --qos=high - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/lf_data/lf_africa.txt diff --git a/train_lf_basket.sh b/train_lf_basket.sh deleted file mode 100755 index b708888..0000000 --- a/train_lf_basket.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --gres=gpu:8 -#SBATCH -c 10 -#SBATCH -C turing -#SBATCH --mem=100G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - -######## #SBATCH --qos=high - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/lf_data/lf_basket.txt diff --git a/train_lf_ship.sh b/train_lf_ship.sh deleted file mode 100755 index debdf39..0000000 --- a/train_lf_ship.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#SBATCH -p q6 -#SBATCH --gres=gpu:4 -#SBATCH -c 10 -#SBATCH -C turing -#SBATCH --mem=80G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out -#SBATCH --qos=normal - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/lf_data/lf_ship.txt diff --git a/train_lf_torch.sh b/train_lf_torch.sh deleted file mode 100755 index 4797e7a..0000000 --- a/train_lf_torch.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --gres=gpu:8 -#SBATCH -c 10 -#SBATCH -C turing -#SBATCH --mem=80G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out -#SBATCH --qos=normal - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/lf_data/lf_torch.txt diff --git a/train_tat_intermediate_m60.sh b/train_tat_intermediate_m60.sh deleted file mode 100755 index 7e094f5..0000000 --- a/train_tat_intermediate_m60.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -#SBATCH -p q6 -#SBATCH --gres=gpu:3 -#SBATCH -c 8 -#SBATCH -C turing -#SBATCH --mem=16G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/run_nerf.py --config $CODE_DIR/configs/tanks_and_temples/tat_intermediate_m60.txt -$PYTHON -u $CODE_DIR/nerf_render_image.py --config $CODE_DIR/configs/tanks_and_temples/tat_intermediate_m60.txt - diff --git a/train_tat_intermediate_playground.sh b/train_tat_intermediate_playground.sh deleted file mode 100755 index 0566ed7..0000000 --- a/train_tat_intermediate_playground.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -#SBATCH -p q6 -#SBATCH --gres=gpu:3 -#SBATCH -c 8 -#SBATCH -C turing -#SBATCH --mem=16G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/run_nerf.py --config $CODE_DIR/configs/tanks_and_temples/tat_intermediate_playground.txt -$PYTHON -u $CODE_DIR/nerf_render_image.py --config $CODE_DIR/configs/tanks_and_temples/tat_intermediate_playground.txt - diff --git a/train_tat_intermediate_playground_bignet.sh b/train_tat_intermediate_playground_bignet.sh deleted file mode 100755 index 3b55772..0000000 --- a/train_tat_intermediate_playground_bignet.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --gres=gpu:8 -#SBATCH -c 25 -#SBATCH -C turing -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/tanks_and_temples/tat_intermediate_playground_bignet.txt diff --git a/train_tat_intermediate_train.sh b/train_tat_intermediate_train.sh deleted file mode 100755 index bf63f23..0000000 --- a/train_tat_intermediate_train.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -#SBATCH -p q6 -#SBATCH --gres=gpu:3 -#SBATCH -c 8 -#SBATCH -C turing -#SBATCH --mem=16G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/run_nerf.py --config $CODE_DIR/configs/tanks_and_temples/tat_intermediate_train.txt -$PYTHON -u $CODE_DIR/nerf_render_image.py --config $CODE_DIR/configs/tanks_and_temples/tat_intermediate_train.txt - diff --git a/train_tat_training_truck.sh b/train_tat_training_truck.sh deleted file mode 100755 index 7b1f472..0000000 --- a/train_tat_training_truck.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#SBATCH -p q6 -#SBATCH --gres=gpu:4 -#SBATCH -c 10 -#SBATCH -C turing -#SBATCH --mem=50G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/tanks_and_temples/tat_training_truck.txt diff --git a/train_tat_training_truck_bignet.sh b/train_tat_training_truck_bignet.sh deleted file mode 100755 index 53c69a9..0000000 --- a/train_tat_training_truck_bignet.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --gres=gpu:8 -#SBATCH -c 25 -#SBATCH -C turing -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf-ddp/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg_latest_ddp -echo $CODE_DIR - -$PYTHON -u $CODE_DIR/ddp_run_nerf.py --config $CODE_DIR/configs/tanks_and_temples/tat_training_truck_bignet.txt diff --git a/train_tat_truck_sphere_subset.sh b/train_tat_truck_sphere_subset.sh deleted file mode 100755 index d91ec4e..0000000 --- a/train_tat_truck_sphere_subset.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --gres=gpu:3 -#SBATCH -c 8 -#SBATCH -C turing -#SBATCH --mem=16G -#SBATCH --time=48:00:00 -#SBATCH --output=slurm_%A.out -#SBATCH --exclude=isl-gpu17 - - -PYTHON=/home/zhangka2/anaconda3/envs/nerf/bin/python - -CODE_DIR=/home/zhangka2/gernot_experi/nerf_bg -echo $CODE_DIR - -$PYTHON $CODE_DIR/run_nerf.py --config $CODE_DIR/configs/tanks_and_temples/tat_training_truck_subset.txt -$PYTHON $CODE_DIR/nerf_render_image.py --config $CODE_DIR/configs/tanks_and_temples/tat_training_truck_subset.txt -