diff --git a/selfdrive/locationd/kalman/helpers/chi2_lookup.py b/selfdrive/locationd/kalman/helpers/chi2_lookup.py index f45b4577f5..e22cc97299 100644 --- a/selfdrive/locationd/kalman/helpers/chi2_lookup.py +++ b/selfdrive/locationd/kalman/helpers/chi2_lookup.py @@ -1,13 +1,14 @@ -import numpy as np import os +import numpy as np + def gen_chi2_ppf_lookup(max_dim=200): from scipy.stats import chi2 table = np.zeros((max_dim, 98)) - for dim in range(1,max_dim): + for dim in range(1, max_dim): table[dim] = chi2.ppf(np.arange(.01, .99, .01), dim) - #outfile = open('chi2_lookup_table', 'w') + np.save('chi2_lookup_table', table) @@ -17,5 +18,5 @@ def chi2_ppf(p, dim): return result -if __name__== "__main__": +if __name__ == "__main__": gen_chi2_ppf_lookup() diff --git a/selfdrive/locationd/kalman/helpers/ekf_sym.py b/selfdrive/locationd/kalman/helpers/ekf_sym.py index f328f0935d..98d4a87b17 100644 --- a/selfdrive/locationd/kalman/helpers/ekf_sym.py +++ b/selfdrive/locationd/kalman/helpers/ekf_sym.py @@ -14,16 +14,15 @@ from selfdrive.locationd.kalman.helpers.chi2_lookup import chi2_ppf def solve(a, b): if a.shape[0] == 1 and a.shape[1] == 1: - #assert np.allclose(b/a[0][0], np.linalg.solve(a, b)) - return b/a[0][0] + return b / a[0][0] else: return np.linalg.solve(a, b) def null(H, eps=1e-12): u, s, vh = np.linalg.svd(H) - padding = max(0,np.shape(H)[1]-np.shape(s)[0]) - null_mask = np.concatenate(((s <= eps), np.ones((padding,),dtype=bool)),axis=0) + padding = max(0, np.shape(H)[1] - np.shape(s)[0]) + null_mask = np.concatenate(((s <= eps), np.ones((padding,), dtype=bool)), axis=0) null_space = np.compress(null_mask, vh, axis=0) return np.transpose(null_space) @@ -41,9 +40,9 @@ def gen_code(name, f_sym, dt_sym, x_sym, obs_eqs, dim_x, dim_err, eskf_params=No f_err_sym = eskf_params[3] x_err_sym = eskf_params[4] else: - nom_x = sp.MatrixSymbol('nom_x',dim_x,1) - true_x = sp.MatrixSymbol('true_x',dim_x,1) - delta_x = sp.MatrixSymbol('delta_x',dim_x,1) + nom_x = sp.MatrixSymbol('nom_x', dim_x, 1) + true_x = sp.MatrixSymbol('true_x', dim_x, 1) + delta_x = sp.MatrixSymbol('delta_x', dim_x, 1) err_function_sym = sp.Matrix(nom_x + delta_x) inv_err_function_sym = sp.Matrix(true_x - nom_x) err_eqs = [err_function_sym, nom_x, delta_x] @@ -63,8 +62,8 @@ def gen_code(name, f_sym, dt_sym, x_sym, obs_eqs, dim_x, dim_err, eskf_params=No dim_augment_err = msckf_params[3] N = msckf_params[4] feature_track_kinds = msckf_params[5] - assert dim_main + dim_augment*N == dim_x - assert dim_main_err + dim_augment_err*N == dim_err + assert dim_main + dim_augment * N == dim_x + assert dim_main_err + dim_augment_err * N == dim_err else: msckf = False dim_main = dim_x @@ -123,7 +122,7 @@ def gen_code(name, f_sym, dt_sym, x_sym, obs_eqs, dim_x, dim_err, eskf_params=No else: He_str = 'NULL' # ea_dim = 1 # not really dim of ea but makes c function work - maha_thresh = chi2_ppf(0.95, int(h_sym.shape[0])) # mahalanobis distance for outlier detection + maha_thresh = chi2_ppf(0.95, int(h_sym.shape[0])) # mahalanobis distance for outlier detection maha_test = kind in maha_test_kinds extra_post += """ void update_%d(double *in_x, double *in_P, double *in_z, double *in_R, double *in_ea) { @@ -143,16 +142,9 @@ def gen_code(name, f_sym, dt_sym, x_sym, obs_eqs, dim_x, dim_err, eskf_params=No class EKF_sym(): def __init__(self, name, Q, x_initial, P_initial, dim_main, dim_main_err, - N=0, dim_augment=0, dim_augment_err=0, maha_test_kinds=[]): - ''' - Generates process function and all - observation functions for the kalman - filter. - ''' - if N > 0: - self.msckf = True - else: - self.msckf = False + N=0, dim_augment=0, dim_augment_err=0, maha_test_kinds=[]): + """Generates process function and all observation functions for the kalman filter.""" + self.msckf = N > 0 self.N = N self.dim_augment = dim_augment self.dim_augment_err = dim_augment_err @@ -163,8 +155,8 @@ class EKF_sym(): x_initial = x_initial.reshape((-1, 1)) self.dim_x = x_initial.shape[0] self.dim_err = P_initial.shape[0] - assert dim_main + dim_augment*N == self.dim_x - assert dim_main_err + dim_augment_err*N == self.dim_err + assert dim_main + dim_augment * N == self.dim_x + assert dim_main_err + dim_augment_err * N == self.dim_err assert Q.shape == P_initial.shape # kinds that should get mahalanobis distance @@ -190,24 +182,29 @@ class EKF_sym(): # wrap all the sympy functions def wrap_1lists(name): - func = eval("lib.%s" % name, {"lib":lib}) + func = eval("lib.%s" % name, {"lib": lib}) + def ret(lst1, out): func(ffi.cast("double *", lst1.ctypes.data), - ffi.cast("double *", out.ctypes.data)) + ffi.cast("double *", out.ctypes.data)) return ret + def wrap_2lists(name): - func = eval("lib.%s" % name, {"lib":lib}) + func = eval("lib.%s" % name, {"lib": lib}) + def ret(lst1, lst2, out): func(ffi.cast("double *", lst1.ctypes.data), - ffi.cast("double *", lst2.ctypes.data), - ffi.cast("double *", out.ctypes.data)) + ffi.cast("double *", lst2.ctypes.data), + ffi.cast("double *", out.ctypes.data)) return ret + def wrap_1list_1float(name): - func = eval("lib.%s" % name, {"lib":lib}) + func = eval("lib.%s" % name, {"lib": lib}) + def ret(lst1, fl, out): func(ffi.cast("double *", lst1.ctypes.data), - ffi.cast("double", fl), - ffi.cast("double *", out.ctypes.data)) + ffi.cast("double", fl), + ffi.cast("double *", out.ctypes.data)) return ret self.f = wrap_1list_1float("f_fun") @@ -221,7 +218,7 @@ class EKF_sym(): for kind in kinds: self.hs[kind] = wrap_2lists("h_%d" % kind) self.Hs[kind] = wrap_2lists("H_%d" % kind) - if self.msckf and kind in self.feature_track_kinds: + if self.msckf and kind in self.feature_track_kinds: self.Hes[kind] = wrap_2lists("He_%d" % kind) # wrap the C++ predict function @@ -235,6 +232,7 @@ class EKF_sym(): # wrap the C++ update function def fun_wrapper(f, kind): f = eval("lib.%s" % f, {"lib": lib}) + def _update_inner_blas(x, P, z, R, extra_args): f(ffi.cast("double *", x.ctypes.data), ffi.cast("double *", P.ctypes.data), @@ -257,43 +255,42 @@ class EKF_sym(): # assign the functions self._predict = _predict_blas - #self._predict = self._predict_python + # self._predict = self._predict_python self._update = _update_blas - #self._update = self._update_python - + # self._update = self._update_python def init_state(self, state, covs, filter_time): self.x = np.array(state.reshape((-1, 1))).astype(np.float64) self.P = np.array(covs).astype(np.float64) self.filter_time = filter_time - self.augment_times = [0]*self.N + self.augment_times = [0] * self.N self.rewind_obscache = [] self.rewind_t = [] self.rewind_states = [] def augment(self): - # TODO this is not a generalized way of doing - # this and implies that the augmented states - # are simply the first (dim_augment_state) - # elements of the main state. + # TODO this is not a generalized way of doing this and implies that the augmented states + # are simply the first (dim_augment_state) elements of the main state. assert self.msckf d1 = self.dim_main d2 = self.dim_main_err d3 = self.dim_augment d4 = self.dim_augment_err + # push through augmented states - self.x[d1:-d3] = self.x[d1+d3:] + self.x[d1:-d3] = self.x[d1 + d3:] self.x[-d3:] = self.x[:d3] assert self.x.shape == (self.dim_x, 1) + # push through augmented covs assert self.P.shape == (self.dim_err, self.dim_err) P_reduced = self.P - P_reduced = np.delete(P_reduced, np.s_[d2:d2+d4], axis=1) - P_reduced = np.delete(P_reduced, np.s_[d2:d2+d4], axis=0) - assert P_reduced.shape == (self.dim_err -d4, self.dim_err -d4) + P_reduced = np.delete(P_reduced, np.s_[d2:d2 + d4], axis=1) + P_reduced = np.delete(P_reduced, np.s_[d2:d2 + d4], axis=0) + assert P_reduced.shape == (self.dim_err - d4, self.dim_err - d4) to_mult = np.zeros((self.dim_err, self.dim_err - d4)) - to_mult[:-d4,:] = np.eye(self.dim_err - d4) - to_mult[-d4:,:d4] = np.eye(d4) + to_mult[:-d4, :] = np.eye(self.dim_err - d4) + to_mult[-d4:, :d4] = np.eye(d4) self.P = to_mult.dot(P_reduced.dot(to_mult.T)) self.augment_times = self.augment_times[1:] self.augment_times.append(self.filter_time) @@ -308,13 +305,13 @@ class EKF_sym(): def rewind(self, t): # find where we are rewinding to idx = bisect_right(self.rewind_t, t) - assert self.rewind_t[idx-1] <= t + assert self.rewind_t[idx - 1] <= t assert self.rewind_t[idx] > t # must be true, or rewind wouldn't be called # set the state to the time right before that - self.filter_time = self.rewind_t[idx-1] - self.x[:] = self.rewind_states[idx-1][0] - self.P[:] = self.rewind_states[idx-1][1] + self.filter_time = self.rewind_t[idx - 1] + self.x[:] = self.rewind_states[idx - 1][0] + self.P[:] = self.rewind_states[idx - 1][1] # return the observations we rewound over for fast forwarding ret = self.rewind_obscache[idx:] @@ -344,7 +341,7 @@ class EKF_sym(): # rewind if self.filter_time is not None and t < self.filter_time: - if len(self.rewind_t) == 0 or t < self.rewind_t[0] or t < self.rewind_t[-1] -1.0: + if len(self.rewind_t) == 0 or t < self.rewind_t[0] or t < self.rewind_t[-1] - 1.0: print("observation too old at %.3f with filter at %.3f, ignoring" % (t, self.filter_time)) return None rewound = self.rewind(t) @@ -430,7 +427,7 @@ class EKF_sym(): P[:d2, d2:] = F_curr.dot(P[:d2, d2:]) P[d2:, :d2] = P[d2:, :d2].dot(F_curr.T) - P += dt*self.Q + P += dt * self.Q return x_new, P def _update_python(self, x, P, kind, z, R, extra_args=[]): @@ -477,15 +474,15 @@ class EKF_sym(): a = np.linalg.inv(H.dot(P).dot(H.T) + R) maha_dist = y.T.dot(a.dot(y)) if maha_dist > chi2_ppf(0.95, y.shape[0]): - R = 10e16*R + R = 10e16 * R # *** same below this line *** # Outlier resilient weighting as described in: # "A Kalman Filter for Robust Outlier Detection - Jo-Anne Ting, ..." - weight = 1 #(1.5)/(1 + np.sum(y**2)/np.sum(R)) + weight = 1 # (1.5)/(1 + np.sum(y**2)/np.sum(R)) - S = dot(dot(H, P), H.T) + R/weight + S = dot(dot(H, P), H.T) + R / weight K = solve(S, dot(H, P.T)).T I_KH = np.eye(P.shape[0]) - dot(K, H) @@ -550,16 +547,16 @@ class EKF_sym(): d1 = self.dim_main d2 = self.dim_main_err - Ck = np.linalg.solve(Pk1_k[:d2,:d2], Fk_1[:d2,:d2].dot(Pk_k[:d2,:d2].T)).T + Ck = np.linalg.solve(Pk1_k[:d2, :d2], Fk_1[:d2, :d2].dot(Pk_k[:d2, :d2].T)).T xk_n = xk_k delta_x = np.zeros((Pk_n.shape[0], 1), dtype=np.float64) self.inv_err_function(xk1_k, xk1_n, delta_x) delta_x[:d2] = Ck.dot(delta_x[:d2]) x_new = np.zeros((xk_n.shape[0], 1), dtype=np.float64) self.err_function(xk_k, delta_x, x_new) - xk_n[:d1] = x_new[:d1,0] + xk_n[:d1] = x_new[:d1, 0] Pk_n = Pk_k - Pk_n[:d2,:d2] = Pk_k[:d2,:d2] + Ck.dot(Pk1_n[:d2,:d2] - Pk1_k[:d2,:d2]).dot(Ck.T) + Pk_n[:d2, :d2] = Pk_k[:d2, :d2] + Ck.dot(Pk1_n[:d2, :d2] - Pk1_k[:d2, :d2]).dot(Ck.T) states_smoothed.append(xk_n) covs_smoothed.append(Pk_n) diff --git a/selfdrive/locationd/kalman/helpers/feature_handler.py b/selfdrive/locationd/kalman/helpers/feature_handler.py index 3596f021f1..d1fad727b5 100755 --- a/selfdrive/locationd/kalman/helpers/feature_handler.py +++ b/selfdrive/locationd/kalman/helpers/feature_handler.py @@ -1,26 +1,26 @@ #!/usr/bin/env python3 import os -import time import numpy as np import common.transformations.orientation as orient +from selfdrive.locationd.kalman.helpers import (TEMPLATE_DIR, load_code, + write_code) from selfdrive.locationd.kalman.helpers.sympy_helpers import quat_matrix_l -from selfdrive.locationd.kalman.helpers import TEMPLATE_DIR, write_code, load_code def sane(track): - img_pos = track[1:,2:4] - diffs_x = abs(img_pos[1:,0] - img_pos[:-1,0]) - diffs_y = abs(img_pos[1:,1] - img_pos[:-1,1]) + img_pos = track[1:, 2:4] + diffs_x = abs(img_pos[1:, 0] - img_pos[:-1, 0]) + diffs_y = abs(img_pos[1:, 1] - img_pos[:-1, 1]) for i in range(1, len(diffs_x)): - if ((diffs_x[i] > 0.05 or diffs_x[i-1] > 0.05) and \ - (diffs_x[i] > 2*diffs_x[i-1] or \ - diffs_x[i] < .5*diffs_x[i-1])) or \ - ((diffs_y[i] > 0.05 or diffs_y[i-1] > 0.05) and \ - (diffs_y[i] > 2*diffs_y[i-1] or \ - diffs_y[i] < .5*diffs_y[i-1])): + if ((diffs_x[i] > 0.05 or diffs_x[i - 1] > 0.05) and + (diffs_x[i] > 2 * diffs_x[i - 1] or + diffs_x[i] < .5 * diffs_x[i - 1])) or \ + ((diffs_y[i] > 0.05 or diffs_y[i - 1] > 0.05) and + (diffs_y[i] > 2 * diffs_y[i - 1] or + diffs_y[i] < .5 * diffs_y[i - 1])): return False return True @@ -45,16 +45,14 @@ class FeatureHandler(): self.MAX_TRACKS = 6000 self.K = K - #Array of tracks, each track - #has K 5D features preceded - #by 5 params that inidicate - #[f_idx, last_idx, updated, complete, valid] + # Array of tracks, each track has K 5D features preceded + # by 5 params that inidicate [f_idx, last_idx, updated, complete, valid] # f_idx: idx of current last feature in track # idx of of last feature in frame # bool for whether this track has been update # bool for whether this track is complete # bool for whether this track is valid - self.tracks = np.zeros((self.MAX_TRACKS, K+1, 5)) + self.tracks = np.zeros((self.MAX_TRACKS, K + 1, 5)) self.tracks[:] = np.nan name = f"{FeatureHandler.name}_{K}" @@ -65,7 +63,7 @@ class FeatureHandler(): ffi.cast("double *", features.ctypes.data), ffi.cast("long long *", empty_idxs.ctypes.data)) - #self.merge_features = self.merge_features_python + # self.merge_features = self.merge_features_python self.merge_features = merge_features_c def reset(self): @@ -75,7 +73,7 @@ class FeatureHandler(): empty_idx = 0 for f in features: match_idx = int(f[4]) - if tracks[match_idx, 0, 1] == match_idx and tracks[match_idx, 0 ,2] == 0: + if tracks[match_idx, 0, 1] == match_idx and tracks[match_idx, 0, 2] == 0: tracks[match_idx, 0, 0] += 1 tracks[match_idx, 0, 1] = f[1] tracks[match_idx, 0, 2] = 1 @@ -95,56 +93,57 @@ class FeatureHandler(): empty_idx += 1 def update_tracks(self, features): - t0 = time.time() - last_idxs = np.copy(self.tracks[:,0,1]) + last_idxs = np.copy(self.tracks[:, 0, 1]) real = np.isfinite(last_idxs) self.tracks[last_idxs[real].astype(int)] = self.tracks[real] + mask = np.ones(self.MAX_TRACKS, np.bool) mask[last_idxs[real].astype(int)] = 0 empty_idxs = np.arange(self.MAX_TRACKS)[mask] + self.tracks[empty_idxs] = np.nan - self.tracks[:,0,2] = 0 + self.tracks[:, 0, 2] = 0 self.merge_features(self.tracks, features, empty_idxs) def handle_features(self, features): self.update_tracks(features) - valid_idxs = self.tracks[:,0,4] == 1 - complete_idxs = self.tracks[:,0,3] == 1 - stale_idxs = self.tracks[:,0,2] == 0 + valid_idxs = self.tracks[:, 0, 4] == 1 + complete_idxs = self.tracks[:, 0, 3] == 1 + stale_idxs = self.tracks[:, 0, 2] == 0 valid_tracks = self.tracks[valid_idxs] self.tracks[complete_idxs] = np.nan self.tracks[stale_idxs] = np.nan - return valid_tracks[:,1:,:4].reshape((len(valid_tracks), self.K*4)) - + return valid_tracks[:, 1:, :4].reshape((len(valid_tracks), self.K * 4)) def generate_orient_error_jac(K): import sympy as sp from common.sympy_helpers import quat_rotate - x_sym = sp.MatrixSymbol('abr', 3,1) - dtheta = sp.MatrixSymbol('dtheta', 3,1) + + x_sym = sp.MatrixSymbol('abr', 3, 1) + dtheta = sp.MatrixSymbol('dtheta', 3, 1) delta_quat = sp.Matrix(np.ones(4)) - delta_quat[1:,:] = sp.Matrix(0.5*dtheta[0:3,:]) - poses_sym = sp.MatrixSymbol('poses', 7*K,1) - img_pos_sym = sp.MatrixSymbol('img_positions', 2*K,1) + delta_quat[1:, :] = sp.Matrix(0.5 * dtheta[0:3, :]) + poses_sym = sp.MatrixSymbol('poses', 7 * K, 1) + img_pos_sym = sp.MatrixSymbol('img_positions', 2 * K, 1) alpha, beta, rho = x_sym - to_c = sp.Matrix(orient.rot_matrix(-np.pi/2, -np.pi/2, 0)) - pos_0 = sp.Matrix(np.array(poses_sym[K*7-7:K*7-4])[:,0]) - q = quat_matrix_l(poses_sym[K*7-4:K*7])*delta_quat + to_c = sp.Matrix(orient.rot_matrix(-np.pi / 2, -np.pi / 2, 0)) + pos_0 = sp.Matrix(np.array(poses_sym[K * 7 - 7:K * 7 - 4])[:, 0]) + q = quat_matrix_l(poses_sym[K * 7 - 4:K * 7]) * delta_quat quat_rot = quat_rotate(*q) - rot_g_to_0 = to_c*quat_rot.T + rot_g_to_0 = to_c * quat_rot.T rows = [] for i in range(K): - pos_i = sp.Matrix(np.array(poses_sym[i*7:i*7+3])[:,0]) - q = quat_matrix_l(poses_sym[7*i+3:7*i+7])*delta_quat + pos_i = sp.Matrix(np.array(poses_sym[i * 7:i * 7 + 3])[:, 0]) + q = quat_matrix_l(poses_sym[7 * i + 3:7 * i + 7]) * delta_quat quat_rot = quat_rotate(*q) - rot_g_to_i = to_c*quat_rot.T - rot_0_to_i = rot_g_to_i*(rot_g_to_0.T) - trans_0_to_i = rot_g_to_i*(pos_0 - pos_i) - funct_vec = rot_0_to_i*sp.Matrix([alpha, beta, 1]) + rho*trans_0_to_i + rot_g_to_i = to_c * quat_rot.T + rot_0_to_i = rot_g_to_i * (rot_g_to_0.T) + trans_0_to_i = rot_g_to_i * (pos_0 - pos_i) + funct_vec = rot_0_to_i * sp.Matrix([alpha, beta, 1]) + rho * trans_0_to_i h1, h2, h3 = funct_vec - rows.append(h1/h3 - img_pos_sym[i*2 +0]) - rows.append(h2/h3 - img_pos_sym[i*2 + 1]) + rows.append(h1 / h3 - img_pos_sym[i * 2 + 0]) + rows.append(h2 / h3 - img_pos_sym[i * 2 + 1]) img_pos_residual_sym = sp.Matrix(rows) # sympy into c diff --git a/selfdrive/locationd/kalman/helpers/lst_sq_computer.py b/selfdrive/locationd/kalman/helpers/lst_sq_computer.py index 4c7a01b710..3f46a7c07c 100755 --- a/selfdrive/locationd/kalman/helpers/lst_sq_computer.py +++ b/selfdrive/locationd/kalman/helpers/lst_sq_computer.py @@ -4,7 +4,6 @@ import os import numpy as np import sympy as sp -#import scipy.optimize as opt import common.transformations.orientation as orient from selfdrive.locationd.kalman.helpers import (TEMPLATE_DIR, load_code, @@ -14,27 +13,28 @@ from selfdrive.locationd.kalman.helpers.sympy_helpers import (quat_rotate, def generate_residual(K): - x_sym = sp.MatrixSymbol('abr', 3,1) - poses_sym = sp.MatrixSymbol('poses', 7*K,1) - img_pos_sym = sp.MatrixSymbol('img_positions', 2*K,1) + x_sym = sp.MatrixSymbol('abr', 3, 1) + poses_sym = sp.MatrixSymbol('poses', 7 * K, 1) + img_pos_sym = sp.MatrixSymbol('img_positions', 2 * K, 1) alpha, beta, rho = x_sym - to_c = sp.Matrix(orient.rot_matrix(-np.pi/2, -np.pi/2, 0)) - pos_0 = sp.Matrix(np.array(poses_sym[K*7-7:K*7-4])[:,0]) - q = poses_sym[K*7-4:K*7] + to_c = sp.Matrix(orient.rot_matrix(-np.pi / 2, -np.pi / 2, 0)) + pos_0 = sp.Matrix(np.array(poses_sym[K * 7 - 7:K * 7 - 4])[:, 0]) + q = poses_sym[K * 7 - 4:K * 7] quat_rot = quat_rotate(*q) - rot_g_to_0 = to_c*quat_rot.T + rot_g_to_0 = to_c * quat_rot.T rows = [] + for i in range(K): - pos_i = sp.Matrix(np.array(poses_sym[i*7:i*7+3])[:,0]) - q = poses_sym[7*i+3:7*i+7] + pos_i = sp.Matrix(np.array(poses_sym[i * 7:i * 7 + 3])[:, 0]) + q = poses_sym[7 * i + 3:7 * i + 7] quat_rot = quat_rotate(*q) - rot_g_to_i = to_c*quat_rot.T - rot_0_to_i = rot_g_to_i*(rot_g_to_0.T) - trans_0_to_i = rot_g_to_i*(pos_0 - pos_i) - funct_vec = rot_0_to_i*sp.Matrix([alpha, beta, 1]) + rho*trans_0_to_i + rot_g_to_i = to_c * quat_rot.T + rot_0_to_i = rot_g_to_i * rot_g_to_0.T + trans_0_to_i = rot_g_to_i * (pos_0 - pos_i) + funct_vec = rot_0_to_i * sp.Matrix([alpha, beta, 1]) + rho * trans_0_to_i h1, h2, h3 = funct_vec - rows.append(h1/h3 - img_pos_sym[i*2 +0]) - rows.append(h2/h3 - img_pos_sym[i*2 + 1]) + rows.append(h1 / h3 - img_pos_sym[i * 2 + 0]) + rows.append(h2 / h3 - img_pos_sym[i * 2 + 1]) img_pos_residual_sym = sp.Matrix(rows) # sympy into c @@ -64,7 +64,7 @@ class LstSqComputer(): write_code(filename, code, header) def __init__(self, K=4, MIN_DEPTH=2, MAX_DEPTH=500): - self.to_c = orient.rot_matrix(-np.pi/2, -np.pi/2, 0) + self.to_c = orient.rot_matrix(-np.pi / 2, -np.pi / 2, 0) self.MAX_DEPTH = MAX_DEPTH self.MIN_DEPTH = MIN_DEPTH @@ -73,20 +73,20 @@ class LstSqComputer(): # wrap c functions def residual_jac(x, poses, img_positions): - out = np.zeros(((K*2, 3)), dtype=np.float64) + out = np.zeros(((K * 2, 3)), dtype=np.float64) lib.jac_fun(ffi.cast("double *", x.ctypes.data), - ffi.cast("double *", poses.ctypes.data), - ffi.cast("double *", img_positions.ctypes.data), - ffi.cast("double *", out.ctypes.data)) + ffi.cast("double *", poses.ctypes.data), + ffi.cast("double *", img_positions.ctypes.data), + ffi.cast("double *", out.ctypes.data)) return out self.residual_jac = residual_jac def residual(x, poses, img_positions): - out = np.zeros((K*2), dtype=np.float64) + out = np.zeros((K * 2), dtype=np.float64) lib.res_fun(ffi.cast("double *", x.ctypes.data), - ffi.cast("double *", poses.ctypes.data), - ffi.cast("double *", img_positions.ctypes.data), - ffi.cast("double *", out.ctypes.data)) + ffi.cast("double *", poses.ctypes.data), + ffi.cast("double *", img_positions.ctypes.data), + ffi.cast("double *", out.ctypes.data)) return out self.residual = residual @@ -96,24 +96,26 @@ class LstSqComputer(): # Can't be a view for the ctype img_positions = np.copy(img_positions) lib.compute_pos(ffi.cast("double *", self.to_c.ctypes.data), - ffi.cast("double *", poses.ctypes.data), - ffi.cast("double *", img_positions.ctypes.data), - ffi.cast("double *", param.ctypes.data), - ffi.cast("double *", pos.ctypes.data)) + ffi.cast("double *", poses.ctypes.data), + ffi.cast("double *", img_positions.ctypes.data), + ffi.cast("double *", param.ctypes.data), + ffi.cast("double *", pos.ctypes.data)) return pos, param self.compute_pos_c = compute_pos_c def compute_pos(self, poses, img_positions, debug=False): pos, param = self.compute_pos_c(poses, img_positions) - #pos, param = self.compute_pos_python(poses, img_positions) - depth = 1/param[2] + # pos, param = self.compute_pos_python(poses, img_positions) + + depth = 1 / param[2] if debug: if not self.debug: raise NotImplementedError("This is not a debug computer") - #orient_err_jac = self.orient_error_jac(param, poses, img_positions, np.zeros(3)).reshape((-1,2,3)) - jac = self.residual_jac(param, poses, img_positions).reshape((-1,2,3)) - res = self.residual(param, poses, img_positions).reshape((-1,2)) - return pos, param, res, jac #, orient_err_jac + + # orient_err_jac = self.orient_error_jac(param, poses, img_positions, np.zeros(3)).reshape((-1,2,3)) + jac = self.residual_jac(param, poses, img_positions).reshape((-1, 2, 3)) + res = self.residual(param, poses, img_positions).reshape((-1, 2)) + return pos, param, res, jac # , orient_err_jac elif (self.MIN_DEPTH < depth < self.MAX_DEPTH): return pos else: @@ -130,45 +132,44 @@ class LstSqComputer(): return [x] def compute_pos_python(self, poses, img_positions, check_quality=False): + import scipy.optimize as opt + # This procedure is also described # in the MSCKF paper (Mourikis et al. 2007) x = np.array([img_positions[-1][0], img_positions[-1][1], 0.1]) - res = opt.leastsq(self.residual, x, Dfun=self.residual_jac, args=(poses, img_positions)) # scipy opt - #res = self.gauss_newton(self.residual, self.residual_jac, x, (poses, img_positions)) # diy gauss_newton + res = opt.leastsq(self.residual, x, Dfun=self.residual_jac, args=(poses, img_positions)) # scipy opt + # res = self.gauss_newton(self.residual, self.residual_jac, x, (poses, img_positions)) # diy gauss_newton alpha, beta, rho = res[0] - rot_0_to_g = (orient.rotations_from_quats(poses[-1,3:])).dot(self.to_c.T) - return (rot_0_to_g.dot(np.array([alpha, beta, 1])))/rho + poses[-1,:3] + rot_0_to_g = (orient.rotations_from_quats(poses[-1, 3:])).dot(self.to_c.T) + return (rot_0_to_g.dot(np.array([alpha, beta, 1]))) / rho + poses[-1, :3] - - -''' -EXPERIMENTAL CODE -''' +# EXPERIMENTAL CODE def unroll_shutter(img_positions, poses, v, rot_rates, ecef_pos): # only speed correction for now - t_roll = 0.016 # 16ms rolling shutter? + t_roll = 0.016 # 16ms rolling shutter? vroll, vpitch, vyaw = rot_rates - A = 0.5*np.array([[-1, -vroll, -vpitch, -vyaw], - [vroll, 0, vyaw, -vpitch], - [vpitch, -vyaw, 0, vroll], - [vyaw, vpitch, -vroll, 0]]) + A = 0.5 * np.array([[-1, -vroll, -vpitch, -vyaw], + [vroll, 0, vyaw, -vpitch], + [vpitch, -vyaw, 0, vroll], + [vyaw, vpitch, -vroll, 0]]) q_dot = A.dot(poses[-1][3:7]) v = np.append(v, q_dot) - v = np.array([v[0], v[1], v[2],0,0,0,0]) - current_pose = poses[-1] + v*0.05 + v = np.array([v[0], v[1], v[2], 0, 0, 0, 0]) + current_pose = poses[-1] + v * 0.05 poses = np.vstack((current_pose, poses)) - dt = -img_positions[:,1]*t_roll/0.48 + dt = -img_positions[:, 1] * t_roll / 0.48 errs = project(poses, ecef_pos) - project(poses + np.atleast_2d(dt).T.dot(np.atleast_2d(v)), ecef_pos) return img_positions - errs + def project(poses, ecef_pos): img_positions = np.zeros((len(poses), 2)) for i, p in enumerate(poses): cam_frame = orient.rotations_from_quats(p[3:]).T.dot(ecef_pos - p[:3]) - img_positions[i] = np.array([cam_frame[1]/cam_frame[0], cam_frame[2]/cam_frame[0]]) + img_positions[i] = np.array([cam_frame[1] / cam_frame[0], cam_frame[2] / cam_frame[0]]) return img_positions diff --git a/selfdrive/locationd/kalman/helpers/sympy_helpers.py b/selfdrive/locationd/kalman/helpers/sympy_helpers.py index c5bfc8a5aa..f8c4f1c5f1 100644 --- a/selfdrive/locationd/kalman/helpers/sympy_helpers.py +++ b/selfdrive/locationd/kalman/helpers/sympy_helpers.py @@ -2,31 +2,35 @@ import sympy as sp import numpy as np + def cross(x): - ret = sp.Matrix(np.zeros((3,3))) - ret[0,1], ret[0,2] = -x[2], x[1] - ret[1,0], ret[1,2] = x[2], -x[0] - ret[2,0], ret[2,1] = -x[1], x[0] + ret = sp.Matrix(np.zeros((3, 3))) + ret[0, 1], ret[0, 2] = -x[2], x[1] + ret[1, 0], ret[1, 2] = x[2], -x[0] + ret[2, 0], ret[2, 1] = -x[1], x[0] return ret + def euler_rotate(roll, pitch, yaw): # make symbolic rotation matrix from eulers - matrix_roll = sp.Matrix([[1, 0, 0], - [0, sp.cos(roll), -sp.sin(roll)], - [0, sp.sin(roll), sp.cos(roll)]]) - matrix_pitch = sp.Matrix([[sp.cos(pitch), 0, sp.sin(pitch)], - [0, 1, 0], - [-sp.sin(pitch), 0, sp.cos(pitch)]]) - matrix_yaw = sp.Matrix([[sp.cos(yaw), -sp.sin(yaw), 0], - [sp.sin(yaw), sp.cos(yaw), 0], - [0, 0, 1]]) - return matrix_yaw*matrix_pitch*matrix_roll + matrix_roll = sp.Matrix([[1, 0, 0], + [0, sp.cos(roll), -sp.sin(roll)], + [0, sp.sin(roll), sp.cos(roll)]]) + matrix_pitch = sp.Matrix([[sp.cos(pitch), 0, sp.sin(pitch)], + [0, 1, 0], + [-sp.sin(pitch), 0, sp.cos(pitch)]]) + matrix_yaw = sp.Matrix([[sp.cos(yaw), -sp.sin(yaw), 0], + [sp.sin(yaw), sp.cos(yaw), 0], + [0, 0, 1]]) + return matrix_yaw * matrix_pitch * matrix_roll + def quat_rotate(q0, q1, q2, q3): # make symbolic rotation matrix from quat - return sp.Matrix([[q0**2 + q1**2 - q2**2 - q3**2, 2*(q1*q2 + q0*q3), 2*(q1*q3 - q0*q2)], - [2*(q1*q2 - q0*q3), q0**2 - q1**2 + q2**2 - q3**2, 2*(q2*q3 + q0*q1)], - [2*(q1*q3 + q0*q2), 2*(q2*q3 - q0*q1), q0**2 - q1**2 - q2**2 + q3**2]]).T + return sp.Matrix([[q0**2 + q1**2 - q2**2 - q3**2, 2 * (q1 * q2 + q0 * q3), 2 * (q1 * q3 - q0 * q2)], + [2 * (q1 * q2 - q0 * q3), q0**2 - q1**2 + q2**2 - q3**2, 2 * (q2 * q3 + q0 * q1)], + [2 * (q1 * q3 + q0 * q2), 2 * (q2 * q3 - q0 * q1), q0**2 - q1**2 - q2**2 + q3**2]]).T + def quat_matrix_l(p): return sp.Matrix([[p[0], -p[1], -p[2], -p[3]], @@ -34,6 +38,7 @@ def quat_matrix_l(p): [p[2], p[3], p[0], -p[1]], [p[3], -p[2], p[1], p[0]]]) + def quat_matrix_r(p): return sp.Matrix([[p[0], -p[1], -p[2], -p[3]], [p[1], p[0], p[3], -p[2]], @@ -49,10 +54,11 @@ def sympy_into_c(sympy_functions): # argument ordering input to sympy is broken with function with output arguments nargs = [] + # reorder the input arguments for aa in args: if aa is None: - nargs.append(codegen.InputArgument(sp.Symbol('unused'), dimensions=[1,1])) + nargs.append(codegen.InputArgument(sp.Symbol('unused'), dimensions=[1, 1])) continue found = False for a in r.arguments: @@ -62,22 +68,23 @@ def sympy_into_c(sympy_functions): break if not found: # [1,1] is a hack for Matrices - nargs.append(codegen.InputArgument(aa, dimensions=[1,1])) + nargs.append(codegen.InputArgument(aa, dimensions=[1, 1])) + # add the output arguments for a in r.arguments: if type(a) == codegen.OutputArgument: nargs.append(a) - #assert len(r.arguments) == len(args)+1 + # assert len(r.arguments) == len(args)+1 r.arguments = nargs # add routine to list routines.append(r) [(c_name, c_code), (h_name, c_header)] = codegen.get_code_generator('C', 'ekf', 'C99').write(routines, "ekf") - c_code = '\n'.join(x for x in c_code.split("\n") if len(x) > 0 and x[0] != '#') - c_header = '\n'.join(x for x in c_header.split("\n") if len(x) > 0 and x[0] != '#') + c_header = '\n'.join(x for x in c_header.split("\n") if len(x) > 0 and x[0] != '#') + c_code = '\n'.join(x for x in c_code.split("\n") if len(x) > 0 and x[0] != '#') c_code = 'extern "C" {\n#include \n' + c_code + "\n}\n" return c_header, c_code