Skip to content

Commit

Permalink
fix a rotation pred err
Browse files Browse the repository at this point in the history
  • Loading branch information
MaverickPeter committed Jul 27, 2021
1 parent 25c0f27 commit 7f2ab04
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 17 deletions.
4 changes: 2 additions & 2 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
EXPERIMENT_NAME = 'occ'

NUM_POINTS = 4096
FEATURE_OUTPUT_DIM = 256
FEATURE_OUTPUT_DIM = 1024
RESULTS_FOLDER = "results/"
OUTPUT_FILE = "results/results.txt"

Expand All @@ -17,7 +17,7 @@
TRAIN_NEGATIVES_PER_QUERY = 4
DECAY_STEP = 100000
DECAY_RATE = 0.7
BASE_LEARNING_RATE = 0.0001
BASE_LEARNING_RATE = 0.00001
MOMENTUM = 0.9
OPTIMIZER = 'ADAM'
MAX_EPOCH = 20
Expand Down
13 changes: 4 additions & 9 deletions evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ def get_latent_vectors(model, dict_to_process):
queries = load_pc_files(file_names)
queries = np.array(queries, dtype=np.float32)
heading = np.array(heading)
heading = heading / np.pi * 180.

with torch.no_grad():
feed_tensor = torch.from_numpy(queries).float()
Expand Down Expand Up @@ -203,14 +204,7 @@ def get_latent_vectors(model, dict_to_process):
queries = np.array(queries, dtype=np.float32)
heading_edge = np.array(heading)
heading_edge = heading_edge.reshape(-1,1)

randomYaw = (np.random.rand(heading_edge.shape[0],1) - 0.5) * 90.
heading_edge = heading_edge / np.pi * 180.
heading_edge += randomYaw

for b in range(queries.shape[0]):
for dims in range(queries.shape[1]):
queries[b,dims,...] = rotation_on_SCI(queries[b,dims,...], randomYaw[b,0])

with torch.no_grad():
feed_tensor = torch.from_numpy(queries).float()
Expand Down Expand Up @@ -287,7 +281,8 @@ def get_recall(m, n, DATABASE_VECTORS, QUERY_VECTORS, QUERY_SETS, FFT_DATABASE_V
if indices[0][j] in true_neighbors:
if(j == 0):
count += 1
gt_angle = GT_sc_angle_convert(-gt_yaw_query[i] + gt_yaw_database[indices[0][j]], cfg.num_sector)

gt_angle = GT_sc_angle_convert(gt_yaw_database[indices[0][j]] - gt_yaw_query[i], cfg.num_sector)
angle, _ = phase_corr(database_fft[indices[0][j]], query_fft[i], device, corr2soft)
angle = angle.detach().cpu().numpy()

Expand Down Expand Up @@ -476,7 +471,7 @@ def rotation_on_SCI(sc, rotation):
cfg.DECAY_RATE = FLAGS.decay_rate
cfg.RESULTS_FOLDER = FLAGS.results_dir

cfg.LOG_DIR = 'log/'
cfg.LOG_DIR = 'log/100celoss/'
cfg.OUTPUT_FILE = cfg.RESULTS_FOLDER + 'results.txt'
cfg.MODEL_FILENAME = "model.ckpt"
cfg.DATASET_FOLDER = FLAGS.dataset_folder
Expand Down
4 changes: 2 additions & 2 deletions generating_queries/nclt/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
EXPERIMENT_NAME = 'occ'

NUM_POINTS = 4096
FEATURE_OUTPUT_DIM = 256
FEATURE_OUTPUT_DIM = 1024
RESULTS_FOLDER = "results/"
OUTPUT_FILE = "results/results.txt"

Expand All @@ -17,7 +17,7 @@
TRAIN_NEGATIVES_PER_QUERY = 4
DECAY_STEP = 100000
DECAY_RATE = 0.7
BASE_LEARNING_RATE = 0.0001
BASE_LEARNING_RATE = 0.00001
MOMENTUM = 0.9
OPTIMIZER = 'ADAM'
MAX_EPOCH = 20
Expand Down
15 changes: 11 additions & 4 deletions train_DiSCO.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@
help='Epoch to run [default: 20]')
parser.add_argument('--batch_num_queries', type=int, default=2,
help='Batch Size during training [default: 2]')
parser.add_argument('--learning_rate', type=float, default=0.0001,
help='Initial learning rate [default: 0.0001]')
parser.add_argument('--learning_rate', type=float, default=0.00001,
help='Initial learning rate [default: 0.00001]')
parser.add_argument('--momentum', type=float, default=0.9,
help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam',
Expand All @@ -57,7 +57,7 @@
help='Margin for hinge loss [default: 0.2]')
parser.add_argument('--loss_function', default='quadruplet', choices=[
'triplet', 'quadruplet'], help='triplet or quadruplet [default: quadruplet]')
parser.add_argument('--loss_not_lazy', action='store_false',
parser.add_argument('--loss_not_lazy', action='store_true',
help='If present, do not use lazy variant of loss')
parser.add_argument('--loss_ignore_zero_batch', action='store_true',
help='If present, mean only batches with loss > 0.0')
Expand Down Expand Up @@ -314,6 +314,7 @@ def train_one_epoch(model, optimizer, corr2soft, optimizer_c2s, train_writer, lo
for dim in range(queries.shape[2]):
queries[0,0,dim,...] = rotation_on_SCI(queries[0,0,dim,...], randomYaw)

# rotate positives to original query so we only use random yaw to gernerate yaw diff
for b in range(positives.shape[1]):
for dims in range(positives.shape[2]):
positives[0,b,dims,...] = rotation_on_SCI(positives[0,b,dims,...], (heading[0]-heading[1+b])/np.pi*180)
Expand All @@ -336,12 +337,18 @@ def train_one_epoch(model, optimizer, corr2soft, optimizer_c2s, train_writer, lo
# visualization
train_writer.add_scalar("Loss", (loss - yaw_loss).cpu().item(), TOTAL_ITERATIONS)
train_writer.add_scalar("Yaw_Loss", yaw_loss.cpu().item(), TOTAL_ITERATIONS)
train_writer.add_scalar("Yaw_Loss_L1", yaw_loss_l1.cpu().item(), TOTAL_ITERATIONS)
train_writer.add_scalar("Yaw_Loss_L1", yaw_loss_l1.item(), TOTAL_ITERATIONS)

train_writer.add_image("query", outfft[0,0,...].unsqueeze(0).detach().cpu(), TOTAL_ITERATIONS)
train_writer.add_image("positive", outfft[1,0,...].unsqueeze(0).detach().cpu(), TOTAL_ITERATIONS)
train_writer.add_image("corr", corr[0,...].unsqueeze(0).detach().cpu(), TOTAL_ITERATIONS)

# if(yaw_loss_l1 > 10):
# print(positives.shape)
# train_writer.add_image("query image", torch.from_numpy(queries[0,0,...].sum(axis=0)).unsqueeze(0).float(), TOTAL_ITERATIONS)
# train_writer.add_image("positive image1", torch.from_numpy(positives[0,0,...].sum(axis=0)).unsqueeze(0).float(), TOTAL_ITERATIONS)
# train_writer.add_image("positive image2", torch.from_numpy(positives[0,1,...].sum(axis=0)).unsqueeze(0).float(), TOTAL_ITERATIONS)

TOTAL_ITERATIONS += cfg.BATCH_NUM_QUERIES

# -------------------- Evaluating ---------------------------
Expand Down

0 comments on commit 7f2ab04

Please sign in to comment.