diff --git a/datasets/intel_lasers.npy b/datasets/intel_lasers.npy index bca7ef5..3af808b 100644 Binary files a/datasets/intel_lasers.npy and b/datasets/intel_lasers.npy differ diff --git a/icp.py b/icp.py index 7b1f73d..2920e82 100644 --- a/icp.py +++ b/icp.py @@ -158,13 +158,6 @@ def icp(A, B, init_pose=None, max_iterations=20, tolerance=0.001): i: number of iterations to converge ''' - size = np.min([A.shape[0], B.shape[0]]) - prev_random_idx = np.sort(np.random.choice( - np.arange(A.shape[0]), size, replace=True)) - cur_random_idx = np.sort(np.random.choice( - np.arange(B.shape[0]), size, replace=True)) - A = A[prev_random_idx] - B = B[cur_random_idx] assert A.shape == B.shape # get number of dimensions @@ -187,13 +180,16 @@ def icp(A, B, init_pose=None, max_iterations=20, tolerance=0.001): distances, indices = nearest_neighbor(src[:m,:].T, dst[:m,:].T) # Reject pairs that have 1 meter distance between them + indices = indices[np.linalg.norm(src[:m, :], axis=0) < 80] + distances = distances[np.linalg.norm(src[:m, :], axis=0) < 80] + filtered_src = src[:, np.linalg.norm(src[:m, :], axis=0) < 80] indices = indices[distances < 1.0] # compute the transformation between # the current source and nearest destination points - T,_,_ = best_fit_transform(src[:m, distances < 1.0].T, dst[:m,indices].T) + T,_,_ = best_fit_transform(filtered_src[:m, distances < 1.0].T, dst[:m,indices].T) - distances = distances[distances <= 1.0] + # distances = distances[distances <= 1.0] # update the current source src = np.dot(T, src) @@ -207,11 +203,11 @@ def icp(A, B, init_pose=None, max_iterations=20, tolerance=0.001): # calculate final transformation T,_,_ = best_fit_transform(A, src[:m,:].T) - theta = np.arctan2(T[1,0], T[0,0]) - t = T[0:2, 2] - angle_res = 1.0 # FIXME: + # theta = np.arctan2(T[1,0], T[0,0]) + # t = T[0:2, 2] + # angle_res = 1.0 # angles = cur_random_idx * angle_res - 90 # cov, _, _ = compute_covariance(src[:m, :].T, A, t, theta, np.radians(angles)) cov = np.eye(3) diff --git a/slam.py b/slam.py index 82b27b1..8056631 100644 --- a/slam.py +++ b/slam.py @@ -33,9 +33,6 @@ def eigsorted(cov): parser = argparse.ArgumentParser(description='Python Graph Slam') -parser.add_argument('--seed', default=None, type=int, - help='Random number generator seed') - parser.add_argument('--draw_last', default=float('inf'), type=int, help='Number of point clouds to draw.') @@ -46,16 +43,13 @@ def eigsorted(cov): parser.set_defaults(save_gif=False) args = parser.parse_args() - + if args.save_gif: import atexit images = [] atexit.register(lambda: imageio.mimsave(f'./slam_{int(time.time())}.gif', images, fps=10)) -if args.seed is not None: - np.random.seed(args.seed) # For testing - # Starting point optimizer = pose_graph.PoseGraphOptimization() pose = np.eye(3) @@ -78,7 +72,7 @@ def eigsorted(cov): continue dx = odom - prev_odom - if np.linalg.norm(dx[0:2]) > 0.3 or abs(dx[2]) > 0.2: + if np.linalg.norm(dx[0:2]) > 0.5 or abs(dx[2]) > 0.2: # Scan Matching A = lasers[prev_idx] B = lasers[odom_idx] @@ -133,8 +127,8 @@ def eigsorted(cov): max_iterations=80, tolerance=0.0001) except Exception as e: continue - information = np.linalg.inv(cov) - if np.mean(distances) < 0.05: + information = np.linalg.inv(cov) + if np.mean(distances) < 0.2: rk = g2o.RobustKernelDCS() optimizer.add_edge([vertex_idx, idx], g2o.SE2(g2o.Isometry2d(tran)), @@ -153,7 +147,9 @@ def eigsorted(cov): x = optimizer.get_pose(idx) r = x.to_isometry().R t = x.to_isometry().t - point_cloud.append((r @ registered_lasers[idx].T + t[:, np.newaxis]).T) + filtered = registered_lasers[idx] + filtered = filtered[np.linalg.norm(filtered, axis=1) < 80] + point_cloud.append((r @ filtered.T + t[:, np.newaxis]).T) traj.append(x.to_vector()[0:2]) point_cloud = np.vstack(point_cloud)