parent
86fb001d62
commit
e47a2e6e30
106 changed files with 3753 additions and 1926 deletions
@ -1,3 +1,3 @@ |
|||||||
version https://git-lfs.github.com/spec/v1 |
version https://git-lfs.github.com/spec/v1 |
||||||
oid sha256:1c82f4ea28008aad9a25cd434e5f817c84d621423739e7bca814f4dcff2b9714 |
oid sha256:6976978d140b928a499f26a6e8d879a752034a762154dde2d9bde55ef59a9c29 |
||||||
size 18376958 |
size 18310604 |
||||||
|
@ -0,0 +1,16 @@ |
|||||||
|
from posix.time cimport clock_gettime, timespec, CLOCK_BOOTTIME, CLOCK_MONOTONIC_RAW |
||||||
|
|
||||||
|
cdef double readclock(int clock_id): |
||||||
|
cdef timespec ts |
||||||
|
cdef double current |
||||||
|
|
||||||
|
clock_gettime(clock_id, &ts) |
||||||
|
current = ts.tv_sec + (ts.tv_nsec / 1000000000.) |
||||||
|
return current |
||||||
|
|
||||||
|
|
||||||
|
def monotonic_time(): |
||||||
|
return readclock(CLOCK_MONOTONIC_RAW) |
||||||
|
|
||||||
|
def sec_since_boot(): |
||||||
|
return readclock(CLOCK_BOOTTIME) |
@ -0,0 +1,103 @@ |
|||||||
|
import os |
||||||
|
import shutil |
||||||
|
import tempfile |
||||||
|
from atomicwrites import AtomicWriter |
||||||
|
|
||||||
|
def mkdirs_exists_ok(path): |
||||||
|
try: |
||||||
|
os.makedirs(path) |
||||||
|
except OSError: |
||||||
|
if not os.path.isdir(path): |
||||||
|
raise |
||||||
|
|
||||||
|
def rm_not_exists_ok(path): |
||||||
|
try: |
||||||
|
os.remove(path) |
||||||
|
except OSError: |
||||||
|
if os.path.exists(path): |
||||||
|
raise |
||||||
|
|
||||||
|
def rm_tree_or_link(path): |
||||||
|
if os.path.islink(path): |
||||||
|
os.unlink(path) |
||||||
|
elif os.path.isdir(path): |
||||||
|
shutil.rmtree(path) |
||||||
|
|
||||||
|
def get_tmpdir_on_same_filesystem(path): |
||||||
|
# TODO(mgraczyk): HACK, we should actually check for which filesystem. |
||||||
|
normpath = os.path.normpath(path) |
||||||
|
parts = normpath.split("/") |
||||||
|
if len(parts) > 1: |
||||||
|
if parts[1].startswith("raid"): |
||||||
|
if len(parts) > 2 and parts[2] == "runner": |
||||||
|
return "/{}/runner/tmp".format(parts[1]) |
||||||
|
elif len(parts) > 2 and parts[2] == "aws": |
||||||
|
return "/{}/aws/tmp".format(parts[1]) |
||||||
|
else: |
||||||
|
return "/{}/tmp".format(parts[1]) |
||||||
|
elif parts[1] == "aws": |
||||||
|
return "/aws/tmp" |
||||||
|
elif parts[1] == "scratch": |
||||||
|
return "/scratch/tmp" |
||||||
|
return "/tmp" |
||||||
|
|
||||||
|
class AutoMoveTempdir(object): |
||||||
|
def __init__(self, target_path, temp_dir=None): |
||||||
|
self._target_path = target_path |
||||||
|
self._path = tempfile.mkdtemp(dir=temp_dir) |
||||||
|
|
||||||
|
@property |
||||||
|
def name(self): |
||||||
|
return self._path |
||||||
|
|
||||||
|
def close(self): |
||||||
|
os.rename(self._path, self._target_path) |
||||||
|
|
||||||
|
def __enter__(self): return self |
||||||
|
|
||||||
|
def __exit__(self, type, value, traceback): |
||||||
|
if type is None: |
||||||
|
self.close() |
||||||
|
else: |
||||||
|
shutil.rmtree(self._path) |
||||||
|
|
||||||
|
class NamedTemporaryDir(object): |
||||||
|
def __init__(self, temp_dir=None): |
||||||
|
self._path = tempfile.mkdtemp(dir=temp_dir) |
||||||
|
|
||||||
|
@property |
||||||
|
def name(self): |
||||||
|
return self._path |
||||||
|
|
||||||
|
def close(self): |
||||||
|
shutil.rmtree(self._path) |
||||||
|
|
||||||
|
def __enter__(self): return self |
||||||
|
|
||||||
|
def __exit__(self, type, value, traceback): |
||||||
|
self.close() |
||||||
|
|
||||||
|
def _get_fileobject_func(writer, temp_dir): |
||||||
|
def _get_fileobject(): |
||||||
|
file_obj = writer.get_fileobject(dir=temp_dir) |
||||||
|
os.chmod(file_obj.name, 0o644) |
||||||
|
return file_obj |
||||||
|
return _get_fileobject |
||||||
|
|
||||||
|
def atomic_write_on_fs_tmp(path, **kwargs): |
||||||
|
"""Creates an atomic writer using a temporary file in a temporary directory |
||||||
|
on the same filesystem as path. |
||||||
|
""" |
||||||
|
# TODO(mgraczyk): This use of AtomicWriter relies on implementation details to set the temp |
||||||
|
# directory. |
||||||
|
writer = AtomicWriter(path, **kwargs) |
||||||
|
return writer._open(_get_fileobject_func(writer, get_tmpdir_on_same_filesystem(path))) |
||||||
|
|
||||||
|
|
||||||
|
def atomic_write_in_dir(path, **kwargs): |
||||||
|
"""Creates an atomic writer using a temporary file in the same directory |
||||||
|
as the destination file. |
||||||
|
""" |
||||||
|
writer = AtomicWriter(path, **kwargs) |
||||||
|
return writer._open(_get_fileobject_func(writer, os.path.dirname(path))) |
||||||
|
|
@ -0,0 +1,95 @@ |
|||||||
|
#!/usr/bin/env python |
||||||
|
import time |
||||||
|
from common.realtime import sec_since_boot |
||||||
|
import selfdrive.messaging as messaging |
||||||
|
from selfdrive.boardd.boardd import can_list_to_can_capnp |
||||||
|
|
||||||
|
def get_vin(logcan, sendcan): |
||||||
|
|
||||||
|
# works on standard 11-bit addresses for diagnostic. Tested on Toyota and Subaru; |
||||||
|
# Honda uses the extended 29-bit addresses, and unfortunately only works from OBDII |
||||||
|
query_msg = [[0x7df, 0, '\x02\x09\x02'.ljust(8, "\x00"), 0], |
||||||
|
[0x7e0, 0, '\x30'.ljust(8, "\x00"), 0]] |
||||||
|
|
||||||
|
cnts = [1, 2] # Number of messages to wait for at each iteration |
||||||
|
vin_valid = True |
||||||
|
|
||||||
|
dat = [] |
||||||
|
for i in range(len(query_msg)): |
||||||
|
cnt = 0 |
||||||
|
sendcan.send(can_list_to_can_capnp([query_msg[i]], msgtype='sendcan')) |
||||||
|
got_response = False |
||||||
|
t_start = sec_since_boot() |
||||||
|
while sec_since_boot() - t_start < 0.05 and not got_response: |
||||||
|
for a in messaging.drain_sock(logcan): |
||||||
|
for can in a.can: |
||||||
|
if can.src == 0 and can.address == 0x7e8: |
||||||
|
vin_valid = vin_valid and is_vin_response_valid(can.dat, i, cnt) |
||||||
|
dat += can.dat[2:] if i == 0 else can.dat[1:] |
||||||
|
cnt += 1 |
||||||
|
if cnt == cnts[i]: |
||||||
|
got_response = True |
||||||
|
time.sleep(0.01) |
||||||
|
|
||||||
|
return "".join(dat[3:]) if vin_valid else "" |
||||||
|
|
||||||
|
""" |
||||||
|
if 'vin' not in gctx: |
||||||
|
print "getting vin" |
||||||
|
gctx['vin'] = query_vin()[3:] |
||||||
|
print "got VIN %s" % (gctx['vin'],) |
||||||
|
cloudlog.info("got VIN %s" % (gctx['vin'],)) |
||||||
|
|
||||||
|
# *** determine platform based on VIN **** |
||||||
|
if vin.startswith("19UDE2F36G"): |
||||||
|
print "ACURA ILX 2016" |
||||||
|
self.civic = False |
||||||
|
else: |
||||||
|
# TODO: add Honda check explicitly |
||||||
|
print "HONDA CIVIC 2016" |
||||||
|
self.civic = True |
||||||
|
|
||||||
|
# *** special case VIN of Acura test platform |
||||||
|
if vin == "19UDE2F36GA001322": |
||||||
|
print "comma.ai test platform detected" |
||||||
|
# it has a gas interceptor and a torque mod |
||||||
|
self.torque_mod = True |
||||||
|
""" |
||||||
|
|
||||||
|
|
||||||
|
# sanity checks on response messages from vin query |
||||||
|
def is_vin_response_valid(can_dat, step, cnt): |
||||||
|
|
||||||
|
can_dat = [ord(i) for i in can_dat] |
||||||
|
|
||||||
|
if len(can_dat) != 8: |
||||||
|
# ISO-TP meesages are all 8 bytes |
||||||
|
return False |
||||||
|
|
||||||
|
if step == 0: |
||||||
|
# VIN does not fit in a single message and it's 20 bytes of data |
||||||
|
if can_dat[0] != 0x10 or can_dat[1] != 0x14: |
||||||
|
return False |
||||||
|
|
||||||
|
if step == 1 and cnt == 0: |
||||||
|
# first response after a CONTINUE query is sent |
||||||
|
if can_dat[0] != 0x21: |
||||||
|
return False |
||||||
|
|
||||||
|
if step == 1 and cnt == 1: |
||||||
|
# second response after a CONTINUE query is sent |
||||||
|
if can_dat[0] != 0x22: |
||||||
|
return False |
||||||
|
|
||||||
|
return True |
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__": |
||||||
|
import zmq |
||||||
|
from selfdrive.services import service_list |
||||||
|
context = zmq.Context() |
||||||
|
logcan = messaging.sub_sock(context, service_list['can'].port) |
||||||
|
sendcan = messaging.pub_sock(context, service_list['sendcan'].port) |
||||||
|
time.sleep(1.) # give time to sendcan socket to start |
||||||
|
|
||||||
|
print get_vin(logcan, sendcan) |
@ -1,3 +1,3 @@ |
|||||||
version https://git-lfs.github.com/spec/v1 |
version https://git-lfs.github.com/spec/v1 |
||||||
oid sha256:4e8c704ed800b27d8ac0dda64aafcbc93239012ab65f3ac7540db5965844b2f0 |
oid sha256:206205b51ce5bfd3c387cb961302879b72d1e1ea3e81bb8b59dbbb7109197142 |
||||||
size 2501608 |
size 2501608 |
||||||
|
@ -0,0 +1,15 @@ |
|||||||
|
// the c version of selfdrive/messaging.py
|
||||||
|
|
||||||
|
#include <zmq.h> |
||||||
|
|
||||||
|
// TODO: refactor to take in service instead of endpoint?
|
||||||
|
void *sub_sock(void *ctx, const char *endpoint) { |
||||||
|
void* sock = zmq_socket(ctx, ZMQ_SUB); |
||||||
|
assert(sock); |
||||||
|
zmq_setsockopt(sock, ZMQ_SUBSCRIBE, "", 0); |
||||||
|
int reconnect_ivl = 500; |
||||||
|
zmq_setsockopt(sock, ZMQ_RECONNECT_IVL_MAX, &reconnect_ivl, sizeof(reconnect_ivl)); |
||||||
|
zmq_connect(sock, endpoint); |
||||||
|
return sock; |
||||||
|
} |
||||||
|
|
@ -1 +1 @@ |
|||||||
#define COMMA_VERSION "0.5.12-release" |
#define COMMA_VERSION "0.5.13-release" |
||||||
|
@ -0,0 +1,28 @@ |
|||||||
|
CC = clang
|
||||||
|
CXX = clang++
|
||||||
|
|
||||||
|
CPPFLAGS = -Wall -g -fPIC -std=c++11 -O2
|
||||||
|
|
||||||
|
OBJS = fastcluster.o test.o
|
||||||
|
DEPS := $(OBJS:.o=.d)
|
||||||
|
|
||||||
|
all: libfastcluster.so |
||||||
|
|
||||||
|
test: libfastcluster.so test.o |
||||||
|
$(CXX) -g -L. -lfastcluster -o $@ $+
|
||||||
|
|
||||||
|
valgrind: test |
||||||
|
valgrind --leak-check=full ./test
|
||||||
|
|
||||||
|
|
||||||
|
libfastcluster.so: fastcluster.o |
||||||
|
$(CXX) -g -shared -o $@ $+
|
||||||
|
|
||||||
|
%.o: %.cpp |
||||||
|
$(CXX) $(CPPFLAGS) -MMD -c $*.cpp
|
||||||
|
|
||||||
|
clean: |
||||||
|
rm -f $(OBJS) $(DEPS) libfastcluster.so test
|
||||||
|
|
||||||
|
|
||||||
|
-include $(DEPS) |
@ -0,0 +1,79 @@ |
|||||||
|
C++ interface to fast hierarchical clustering algorithms |
||||||
|
======================================================== |
||||||
|
|
||||||
|
This is a simplified C++ interface to fast implementations of hierarchical |
||||||
|
clustering by Daniel Müllner. The original library with interfaces to R |
||||||
|
and Python is described in: |
||||||
|
|
||||||
|
Daniel Müllner: "fastcluster: Fast Hierarchical, Agglomerative Clustering |
||||||
|
Routines for R and Python." Journal of Statistical Software 53 (2013), |
||||||
|
no. 9, pp. 1–18, http://www.jstatsoft.org/v53/i09/ |
||||||
|
|
||||||
|
|
||||||
|
Usage of the library |
||||||
|
-------------------- |
||||||
|
|
||||||
|
For using the library, the following source files are needed: |
||||||
|
|
||||||
|
fastcluster_dm.cpp, fastcluster_R_dm.cpp |
||||||
|
original code by Daniel Müllner |
||||||
|
these are included by fastcluster.cpp via #include, and therefore |
||||||
|
need not be compiled to object code |
||||||
|
|
||||||
|
fastcluster.[h|cpp] |
||||||
|
simplified C++ interface |
||||||
|
fastcluster.cpp is the only file that must be compiled |
||||||
|
|
||||||
|
The library provides the clustering function *hclust_fast* for |
||||||
|
creating the dendrogram information in an encoding as used by the |
||||||
|
R function *hclust*. For a description of the parameters, see fastcluster.h. |
||||||
|
Its parameter *method* can be one of |
||||||
|
|
||||||
|
HCLUST_METHOD_SINGLE |
||||||
|
single link with the minimum spanning tree algorithm (Rohlf, 1973) |
||||||
|
|
||||||
|
HHCLUST_METHOD_COMPLETE |
||||||
|
complete link with the nearest-neighbor-chain algorithm (Murtagh, 1984) |
||||||
|
|
||||||
|
HCLUST_METHOD_AVERAGE |
||||||
|
complete link with the nearest-neighbor-chain algorithm (Murtagh, 1984) |
||||||
|
|
||||||
|
HCLUST_METHOD_MEDIAN |
||||||
|
median link with the generic algorithm (Müllner, 2011) |
||||||
|
|
||||||
|
For splitting the dendrogram into clusters, the two functions *cutree_k* |
||||||
|
and *cutree_cdist* are provided. |
||||||
|
|
||||||
|
Note that output parameters must be allocated beforehand, e.g. |
||||||
|
int* merge = new int[2*(npoints-1)]; |
||||||
|
For a complete usage example, see lines 135-142 of demo.cpp. |
||||||
|
|
||||||
|
|
||||||
|
Demonstration program |
||||||
|
--------------------- |
||||||
|
|
||||||
|
A simple demo is implemented in demo.cpp, which can be compiled and run with |
||||||
|
|
||||||
|
make |
||||||
|
./hclust-demo -m complete lines.csv |
||||||
|
|
||||||
|
It creates two clusters of line segments such that the segment angle between |
||||||
|
line segments of different clusters have a maximum (cosine) dissimilarity. |
||||||
|
For visualizing the result, plotresult.r can be used as follows |
||||||
|
(requires R <https://r-project.org> to be installed): |
||||||
|
|
||||||
|
./hclust-demo -m complete lines.csv | Rscript plotresult.r |
||||||
|
|
||||||
|
|
||||||
|
Authors & Copyright |
||||||
|
------------------- |
||||||
|
|
||||||
|
Daniel Müllner, 2011, <http://danifold.net> |
||||||
|
Christoph Dalitz, 2018, <http://www.hsnr.de/ipattern/> |
||||||
|
|
||||||
|
|
||||||
|
License |
||||||
|
------- |
||||||
|
|
||||||
|
This code is provided under a BSD-style license. |
||||||
|
See the file LICENSE for details. |
@ -0,0 +1,218 @@ |
|||||||
|
//
|
||||||
|
// C++ standalone verion of fastcluster by Daniel Müllner
|
||||||
|
//
|
||||||
|
// Copyright: Christoph Dalitz, 2018
|
||||||
|
// Daniel Müllner, 2011
|
||||||
|
// License: BSD style license
|
||||||
|
// (see the file LICENSE for details)
|
||||||
|
//
|
||||||
|
|
||||||
|
|
||||||
|
#include <vector> |
||||||
|
#include <algorithm> |
||||||
|
#include <cmath> |
||||||
|
|
||||||
|
|
||||||
|
extern "C" { |
||||||
|
#include "fastcluster.h" |
||||||
|
} |
||||||
|
|
||||||
|
// Code by Daniel Müllner
|
||||||
|
// workaround to make it usable as a standalone version (without R)
|
||||||
|
bool fc_isnan(double x) { return false; } |
||||||
|
#include "fastcluster_dm.cpp" |
||||||
|
#include "fastcluster_R_dm.cpp" |
||||||
|
|
||||||
|
extern "C" { |
||||||
|
//
|
||||||
|
// Assigns cluster labels (0, ..., nclust-1) to the n points such
|
||||||
|
// that the cluster result is split into nclust clusters.
|
||||||
|
//
|
||||||
|
// Input arguments:
|
||||||
|
// n = number of observables
|
||||||
|
// merge = clustering result in R format
|
||||||
|
// nclust = number of clusters
|
||||||
|
// Output arguments:
|
||||||
|
// labels = allocated integer array of size n for result
|
||||||
|
//
|
||||||
|
void cutree_k(int n, const int* merge, int nclust, int* labels) { |
||||||
|
|
||||||
|
int k,m1,m2,j,l; |
||||||
|
|
||||||
|
if (nclust > n || nclust < 2) { |
||||||
|
for (j=0; j<n; j++) labels[j] = 0; |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
// assign to each observable the number of its last merge step
|
||||||
|
// beware: indices of observables in merge start at 1 (R convention)
|
||||||
|
std::vector<int> last_merge(n, 0); |
||||||
|
for (k=1; k<=(n-nclust); k++) { |
||||||
|
// (m1,m2) = merge[k,]
|
||||||
|
m1 = merge[k-1]; |
||||||
|
m2 = merge[n-1+k-1]; |
||||||
|
if (m1 < 0 && m2 < 0) { // both single observables
|
||||||
|
last_merge[-m1-1] = last_merge[-m2-1] = k; |
||||||
|
} |
||||||
|
else if (m1 < 0 || m2 < 0) { // one is a cluster
|
||||||
|
if(m1 < 0) { j = -m1; m1 = m2; } else j = -m2; |
||||||
|
// merging single observable and cluster
|
||||||
|
for(l = 0; l < n; l++) |
||||||
|
if (last_merge[l] == m1) |
||||||
|
last_merge[l] = k; |
||||||
|
last_merge[j-1] = k; |
||||||
|
} |
||||||
|
else { // both cluster
|
||||||
|
for(l=0; l < n; l++) { |
||||||
|
if( last_merge[l] == m1 || last_merge[l] == m2 ) |
||||||
|
last_merge[l] = k; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// assign cluster labels
|
||||||
|
int label = 0; |
||||||
|
std::vector<int> z(n,-1); |
||||||
|
for (j=0; j<n; j++) { |
||||||
|
if (last_merge[j] == 0) { // still singleton
|
||||||
|
labels[j] = label++; |
||||||
|
} else { |
||||||
|
if (z[last_merge[j]] < 0) { |
||||||
|
z[last_merge[j]] = label++; |
||||||
|
} |
||||||
|
labels[j] = z[last_merge[j]]; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
//
|
||||||
|
// Assigns cluster labels (0, ..., nclust-1) to the n points such
|
||||||
|
// that the hierarchical clustering is stopped when cluster distance >= cdist
|
||||||
|
//
|
||||||
|
// Input arguments:
|
||||||
|
// n = number of observables
|
||||||
|
// merge = clustering result in R format
|
||||||
|
// height = cluster distance at each merge step
|
||||||
|
// cdist = cutoff cluster distance
|
||||||
|
// Output arguments:
|
||||||
|
// labels = allocated integer array of size n for result
|
||||||
|
//
|
||||||
|
void cutree_cdist(int n, const int* merge, double* height, double cdist, int* labels) { |
||||||
|
|
||||||
|
int k; |
||||||
|
|
||||||
|
for (k=0; k<(n-1); k++) { |
||||||
|
if (height[k] >= cdist) { |
||||||
|
break; |
||||||
|
} |
||||||
|
} |
||||||
|
cutree_k(n, merge, n-k, labels); |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
//
|
||||||
|
// Hierarchical clustering with one of Daniel Muellner's fast algorithms
|
||||||
|
//
|
||||||
|
// Input arguments:
|
||||||
|
// n = number of observables
|
||||||
|
// distmat = condensed distance matrix, i.e. an n*(n-1)/2 array representing
|
||||||
|
// the upper triangle (without diagonal elements) of the distance
|
||||||
|
// matrix, e.g. for n=4:
|
||||||
|
// d00 d01 d02 d03
|
||||||
|
// d10 d11 d12 d13 -> d01 d02 d03 d12 d13 d23
|
||||||
|
// d20 d21 d22 d23
|
||||||
|
// d30 d31 d32 d33
|
||||||
|
// method = cluster metric (see enum method_code)
|
||||||
|
// Output arguments:
|
||||||
|
// merge = allocated (n-1)x2 matrix (2*(n-1) array) for storing result.
|
||||||
|
// Result follows R hclust convention:
|
||||||
|
// - observabe indices start with one
|
||||||
|
// - merge[i][] contains the merged nodes in step i
|
||||||
|
// - merge[i][j] is negative when the node is an atom
|
||||||
|
// height = allocated (n-1) array with distances at each merge step
|
||||||
|
// Return code:
|
||||||
|
// 0 = ok
|
||||||
|
// 1 = invalid method
|
||||||
|
//
|
||||||
|
int hclust_fast(int n, double* distmat, int method, int* merge, double* height) { |
||||||
|
|
||||||
|
// call appropriate culstering function
|
||||||
|
cluster_result Z2(n-1); |
||||||
|
if (method == HCLUST_METHOD_SINGLE) { |
||||||
|
// single link
|
||||||
|
MST_linkage_core(n, distmat, Z2); |
||||||
|
} |
||||||
|
else if (method == HCLUST_METHOD_COMPLETE) { |
||||||
|
// complete link
|
||||||
|
NN_chain_core<METHOD_METR_COMPLETE, t_float>(n, distmat, NULL, Z2); |
||||||
|
} |
||||||
|
else if (method == HCLUST_METHOD_AVERAGE) { |
||||||
|
// best average distance
|
||||||
|
double* members = new double[n]; |
||||||
|
for (int i=0; i<n; i++) members[i] = 1; |
||||||
|
NN_chain_core<METHOD_METR_AVERAGE, t_float>(n, distmat, members, Z2); |
||||||
|
delete[] members; |
||||||
|
} |
||||||
|
else if (method == HCLUST_METHOD_MEDIAN) { |
||||||
|
// best median distance (beware: O(n^3))
|
||||||
|
generic_linkage<METHOD_METR_MEDIAN, t_float>(n, distmat, NULL, Z2); |
||||||
|
} |
||||||
|
else if (method == HCLUST_METHOD_CENTROID) { |
||||||
|
// best centroid distance (beware: O(n^3))
|
||||||
|
double* members = new double[n]; |
||||||
|
for (int i=0; i<n; i++) members[i] = 1; |
||||||
|
generic_linkage<METHOD_METR_CENTROID, t_float>(n, distmat, members, Z2); |
||||||
|
delete[] members; |
||||||
|
} |
||||||
|
else { |
||||||
|
return 1; |
||||||
|
} |
||||||
|
|
||||||
|
int* order = new int[n]; |
||||||
|
if (method == HCLUST_METHOD_MEDIAN || method == HCLUST_METHOD_CENTROID) { |
||||||
|
generate_R_dendrogram<true>(merge, height, order, Z2, n); |
||||||
|
} else { |
||||||
|
generate_R_dendrogram<false>(merge, height, order, Z2, n); |
||||||
|
} |
||||||
|
delete[] order; // only needed for visualization
|
||||||
|
|
||||||
|
return 0; |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
// Build condensed distance matrix
|
||||||
|
// Input arguments:
|
||||||
|
// n = number of observables
|
||||||
|
// m = dimension of observable
|
||||||
|
// Output arguments:
|
||||||
|
// out = allocated integer array of size n * (n - 1) / 2 for result
|
||||||
|
void hclust_pdist(int n, int m, double* pts, double* out) { |
||||||
|
int ii = 0; |
||||||
|
for (int i = 0; i < n; i++){ |
||||||
|
for (int j = i + 1; j < n; j++){ |
||||||
|
// Compute euclidian distance
|
||||||
|
double d = 0; |
||||||
|
for (int k = 0; k < m; k ++){ |
||||||
|
double error = pts[i * m + k] - pts[j * m + k]; |
||||||
|
d += (error * error); |
||||||
|
} |
||||||
|
out[ii] = d;//sqrt(d);
|
||||||
|
ii++; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
void cluster_points_centroid(int n, int m, double* pts, double dist, int* idx) { |
||||||
|
double* pdist = new double[n * (n - 1) / 2]; |
||||||
|
int* merge = new int[2 * (n - 1)]; |
||||||
|
double* height = new double[n - 1]; |
||||||
|
|
||||||
|
hclust_pdist(n, m, pts, pdist); |
||||||
|
hclust_fast(n, pdist, HCLUST_METHOD_CENTROID, merge, height); |
||||||
|
cutree_cdist(n, merge, height, dist, idx); |
||||||
|
|
||||||
|
delete[] pdist; |
||||||
|
delete[] merge; |
||||||
|
delete[] height; |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,77 @@ |
|||||||
|
//
|
||||||
|
// C++ standalone verion of fastcluster by Daniel Muellner
|
||||||
|
//
|
||||||
|
// Copyright: Daniel Muellner, 2011
|
||||||
|
// Christoph Dalitz, 2018
|
||||||
|
// License: BSD style license
|
||||||
|
// (see the file LICENSE for details)
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef fastclustercpp_H |
||||||
|
#define fastclustercpp_H |
||||||
|
|
||||||
|
//
|
||||||
|
// Assigns cluster labels (0, ..., nclust-1) to the n points such
|
||||||
|
// that the cluster result is split into nclust clusters.
|
||||||
|
//
|
||||||
|
// Input arguments:
|
||||||
|
// n = number of observables
|
||||||
|
// merge = clustering result in R format
|
||||||
|
// nclust = number of clusters
|
||||||
|
// Output arguments:
|
||||||
|
// labels = allocated integer array of size n for result
|
||||||
|
//
|
||||||
|
void cutree_k(int n, const int* merge, int nclust, int* labels); |
||||||
|
|
||||||
|
//
|
||||||
|
// Assigns cluster labels (0, ..., nclust-1) to the n points such
|
||||||
|
// that the hierarchical clsutering is stopped at cluster distance cdist
|
||||||
|
//
|
||||||
|
// Input arguments:
|
||||||
|
// n = number of observables
|
||||||
|
// merge = clustering result in R format
|
||||||
|
// height = cluster distance at each merge step
|
||||||
|
// cdist = cutoff cluster distance
|
||||||
|
// Output arguments:
|
||||||
|
// labels = allocated integer array of size n for result
|
||||||
|
//
|
||||||
|
void cutree_cdist(int n, const int* merge, double* height, double cdist, int* labels); |
||||||
|
|
||||||
|
//
|
||||||
|
// Hierarchical clustering with one of Daniel Muellner's fast algorithms
|
||||||
|
//
|
||||||
|
// Input arguments:
|
||||||
|
// n = number of observables
|
||||||
|
// distmat = condensed distance matrix, i.e. an n*(n-1)/2 array representing
|
||||||
|
// the upper triangle (without diagonal elements) of the distance
|
||||||
|
// matrix, e.g. for n=4:
|
||||||
|
// d00 d01 d02 d03
|
||||||
|
// d10 d11 d12 d13 -> d01 d02 d03 d12 d13 d23
|
||||||
|
// d20 d21 d22 d23
|
||||||
|
// d30 d31 d32 d33
|
||||||
|
// method = cluster metric (see enum method_code)
|
||||||
|
// Output arguments:
|
||||||
|
// merge = allocated (n-1)x2 matrix (2*(n-1) array) for storing result.
|
||||||
|
// Result follows R hclust convention:
|
||||||
|
// - observabe indices start with one
|
||||||
|
// - merge[i][] contains the merged nodes in step i
|
||||||
|
// - merge[i][j] is negative when the node is an atom
|
||||||
|
// height = allocated (n-1) array with distances at each merge step
|
||||||
|
// Return code:
|
||||||
|
// 0 = ok
|
||||||
|
// 1 = invalid method
|
||||||
|
//
|
||||||
|
int hclust_fast(int n, double* distmat, int method, int* merge, double* height); |
||||||
|
enum hclust_fast_methods { |
||||||
|
HCLUST_METHOD_SINGLE = 0, |
||||||
|
HCLUST_METHOD_COMPLETE = 1, |
||||||
|
HCLUST_METHOD_AVERAGE = 2, |
||||||
|
HCLUST_METHOD_MEDIAN = 3, |
||||||
|
HCLUST_METHOD_CENTROID = 5, |
||||||
|
}; |
||||||
|
|
||||||
|
void hclust_pdist(int n, int m, double* pts, double* out); |
||||||
|
void cluster_points_centroid(int n, int m, double* pts, double dist, int* idx); |
||||||
|
|
||||||
|
|
||||||
|
#endif |
@ -0,0 +1,115 @@ |
|||||||
|
//
|
||||||
|
// Excerpt from fastcluster_R.cpp
|
||||||
|
//
|
||||||
|
// Copyright: Daniel Müllner, 2011 <http://danifold.net>
|
||||||
|
//
|
||||||
|
|
||||||
|
struct pos_node { |
||||||
|
t_index pos; |
||||||
|
int node; |
||||||
|
}; |
||||||
|
|
||||||
|
void order_nodes(const int N, const int * const merge, const t_index * const node_size, int * const order) { |
||||||
|
/* Parameters:
|
||||||
|
N : number of data points |
||||||
|
merge : (N-1)×2 array which specifies the node indices which are |
||||||
|
merged in each step of the clustering procedure. |
||||||
|
Negative entries -1...-N point to singleton nodes, while |
||||||
|
positive entries 1...(N-1) point to nodes which are themselves |
||||||
|
parents of other nodes. |
||||||
|
node_size : array of node sizes - makes it easier |
||||||
|
order : output array of size N |
||||||
|
|
||||||
|
Runtime: Θ(N) |
||||||
|
*/ |
||||||
|
auto_array_ptr<pos_node> queue(N/2); |
||||||
|
|
||||||
|
int parent; |
||||||
|
int child; |
||||||
|
t_index pos = 0; |
||||||
|
|
||||||
|
queue[0].pos = 0; |
||||||
|
queue[0].node = N-2; |
||||||
|
t_index idx = 1; |
||||||
|
|
||||||
|
do { |
||||||
|
--idx; |
||||||
|
pos = queue[idx].pos; |
||||||
|
parent = queue[idx].node; |
||||||
|
|
||||||
|
// First child
|
||||||
|
child = merge[parent]; |
||||||
|
if (child<0) { // singleton node, write this into the 'order' array.
|
||||||
|
order[pos] = -child; |
||||||
|
++pos; |
||||||
|
} |
||||||
|
else { /* compound node: put it on top of the queue and decompose it
|
||||||
|
in a later iteration. */ |
||||||
|
queue[idx].pos = pos; |
||||||
|
queue[idx].node = child-1; // convert index-1 based to index-0 based
|
||||||
|
++idx; |
||||||
|
pos += node_size[child-1]; |
||||||
|
} |
||||||
|
// Second child
|
||||||
|
child = merge[parent+N-1]; |
||||||
|
if (child<0) { |
||||||
|
order[pos] = -child; |
||||||
|
} |
||||||
|
else { |
||||||
|
queue[idx].pos = pos; |
||||||
|
queue[idx].node = child-1; |
||||||
|
++idx; |
||||||
|
} |
||||||
|
} while (idx>0); |
||||||
|
} |
||||||
|
|
||||||
|
#define size_(r_) ( ((r_<N) ? 1 : node_size[r_-N]) ) |
||||||
|
|
||||||
|
template <const bool sorted> |
||||||
|
void generate_R_dendrogram(int * const merge, double * const height, int * const order, cluster_result & Z2, const int N) { |
||||||
|
// The array "nodes" is a union-find data structure for the cluster
|
||||||
|
// identites (only needed for unsorted cluster_result input).
|
||||||
|
union_find nodes(sorted ? 0 : N); |
||||||
|
if (!sorted) { |
||||||
|
std::stable_sort(Z2[0], Z2[N-1]); |
||||||
|
} |
||||||
|
|
||||||
|
t_index node1, node2; |
||||||
|
auto_array_ptr<t_index> node_size(N-1); |
||||||
|
|
||||||
|
for (t_index i=0; i<N-1; ++i) { |
||||||
|
// Get two data points whose clusters are merged in step i.
|
||||||
|
// Find the cluster identifiers for these points.
|
||||||
|
if (sorted) { |
||||||
|
node1 = Z2[i]->node1; |
||||||
|
node2 = Z2[i]->node2; |
||||||
|
} |
||||||
|
else { |
||||||
|
node1 = nodes.Find(Z2[i]->node1); |
||||||
|
node2 = nodes.Find(Z2[i]->node2); |
||||||
|
// Merge the nodes in the union-find data structure by making them
|
||||||
|
// children of a new node.
|
||||||
|
nodes.Union(node1, node2); |
||||||
|
} |
||||||
|
// Sort the nodes in the output array.
|
||||||
|
if (node1>node2) { |
||||||
|
t_index tmp = node1; |
||||||
|
node1 = node2; |
||||||
|
node2 = tmp; |
||||||
|
} |
||||||
|
/* Conversion between labeling conventions.
|
||||||
|
Input: singleton nodes 0,...,N-1 |
||||||
|
compound nodes N,...,2N-2 |
||||||
|
Output: singleton nodes -1,...,-N |
||||||
|
compound nodes 1,...,N |
||||||
|
*/ |
||||||
|
merge[i] = (node1<N) ? -static_cast<int>(node1)-1 |
||||||
|
: static_cast<int>(node1)-N+1; |
||||||
|
merge[i+N-1] = (node2<N) ? -static_cast<int>(node2)-1 |
||||||
|
: static_cast<int>(node2)-N+1; |
||||||
|
height[i] = Z2[i]->dist; |
||||||
|
node_size[i] = size_(node1) + size_(node2); |
||||||
|
} |
||||||
|
|
||||||
|
order_nodes(N, merge, node_size, order); |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,30 @@ |
|||||||
|
import os |
||||||
|
import numpy as np |
||||||
|
|
||||||
|
from cffi import FFI |
||||||
|
import subprocess |
||||||
|
|
||||||
|
cluster_dir = os.path.join(os.path.dirname(os.path.abspath(__file__))) |
||||||
|
subprocess.check_call(["make", "-j4"], cwd=cluster_dir) |
||||||
|
|
||||||
|
cluster_fn = os.path.join(cluster_dir, "libfastcluster.so") |
||||||
|
|
||||||
|
ffi = FFI() |
||||||
|
ffi.cdef(""" |
||||||
|
int hclust_fast(int n, double* distmat, int method, int* merge, double* height); |
||||||
|
void cutree_cdist(int n, const int* merge, double* height, double cdist, int* labels); |
||||||
|
void hclust_pdist(int n, int m, double* pts, double* out); |
||||||
|
void cluster_points_centroid(int n, int m, double* pts, double dist, int* idx); |
||||||
|
""") |
||||||
|
|
||||||
|
hclust = ffi.dlopen(cluster_fn) |
||||||
|
|
||||||
|
|
||||||
|
def cluster_points_centroid(pts, dist): |
||||||
|
pts = np.ascontiguousarray(pts, dtype=np.float64) |
||||||
|
pts_ptr = ffi.cast("double *", pts.ctypes.data) |
||||||
|
n, m = pts.shape |
||||||
|
|
||||||
|
labels_ptr = ffi.new("int[]", n) |
||||||
|
hclust.cluster_points_centroid(n, m, pts_ptr, dist**2, labels_ptr) |
||||||
|
return list(labels_ptr) |
@ -0,0 +1,35 @@ |
|||||||
|
#include <cassert> |
||||||
|
|
||||||
|
extern "C" { |
||||||
|
#include "fastcluster.h" |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, const char* argv[]){ |
||||||
|
const int n = 11; |
||||||
|
const int m = 3; |
||||||
|
double* pts = new double[n*m]{59.26000137, -9.35999966, -5.42500019, |
||||||
|
91.61999817, -0.31999999, -2.75, |
||||||
|
31.38000031, 0.40000001, -0.2, |
||||||
|
89.57999725, -8.07999992, -18.04999924, |
||||||
|
53.42000122, 0.63999999, -0.175, |
||||||
|
31.38000031, 0.47999999, -0.2, |
||||||
|
36.33999939, 0.16, -0.2, |
||||||
|
53.33999939, 0.95999998, -0.175, |
||||||
|
59.26000137, -9.76000023, -5.44999981, |
||||||
|
33.93999977, 0.40000001, -0.22499999, |
||||||
|
106.74000092, -5.76000023, -18.04999924}; |
||||||
|
|
||||||
|
int * idx = new int[n]; |
||||||
|
int * correct_idx = new int[n]{0, 1, 2, 3, 4, 2, 5, 4, 0, 5, 6}; |
||||||
|
|
||||||
|
cluster_points_centroid(n, m, pts, 2.5 * 2.5, idx); |
||||||
|
|
||||||
|
for (int i = 0; i < n; i++){ |
||||||
|
assert(idx[i] == correct_idx[i]); |
||||||
|
} |
||||||
|
|
||||||
|
delete[] idx; |
||||||
|
delete[] correct_idx; |
||||||
|
delete[] pts; |
||||||
|
} |
@ -0,0 +1,138 @@ |
|||||||
|
import time |
||||||
|
import unittest |
||||||
|
import numpy as np |
||||||
|
from fastcluster import linkage_vector |
||||||
|
from scipy.cluster import _hierarchy |
||||||
|
from scipy.spatial.distance import pdist |
||||||
|
|
||||||
|
from selfdrive.controls.lib.cluster.fastcluster_py import hclust, ffi |
||||||
|
from selfdrive.controls.lib.cluster.fastcluster_py import cluster_points_centroid |
||||||
|
|
||||||
|
|
||||||
|
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None): |
||||||
|
# supersimplified function to get fast clustering. Got it from scipy |
||||||
|
Z = np.asarray(Z, order='c') |
||||||
|
n = Z.shape[0] + 1 |
||||||
|
T = np.zeros((n,), dtype='i') |
||||||
|
_hierarchy.cluster_dist(Z, T, float(t), int(n)) |
||||||
|
return T |
||||||
|
|
||||||
|
|
||||||
|
TRACK_PTS = np.array([[59.26000137, -9.35999966, -5.42500019], |
||||||
|
[91.61999817, -0.31999999, -2.75], |
||||||
|
[31.38000031, 0.40000001, -0.2], |
||||||
|
[89.57999725, -8.07999992, -18.04999924], |
||||||
|
[53.42000122, 0.63999999, -0.175], |
||||||
|
[31.38000031, 0.47999999, -0.2], |
||||||
|
[36.33999939, 0.16, -0.2], |
||||||
|
[53.33999939, 0.95999998, -0.175], |
||||||
|
[59.26000137, -9.76000023, -5.44999981], |
||||||
|
[33.93999977, 0.40000001, -0.22499999], |
||||||
|
[106.74000092, -5.76000023, -18.04999924]]) |
||||||
|
|
||||||
|
CORRECT_LINK = np.array([[2., 5., 0.07999998, 2.], |
||||||
|
[4., 7., 0.32984889, 2.], |
||||||
|
[0., 8., 0.40078104, 2.], |
||||||
|
[6., 9., 2.41209933, 2.], |
||||||
|
[11., 14., 3.76342275, 4.], |
||||||
|
[12., 13., 13.02297651, 4.], |
||||||
|
[1., 3., 17.27626057, 2.], |
||||||
|
[10., 17., 17.92918845, 3.], |
||||||
|
[15., 16., 23.68525366, 8.], |
||||||
|
[18., 19., 52.52351319, 11.]]) |
||||||
|
|
||||||
|
CORRECT_LABELS = np.array([7, 1, 4, 2, 6, 4, 5, 6, 7, 5, 3], dtype=np.int32) |
||||||
|
|
||||||
|
|
||||||
|
def plot_cluster(pts, idx_old, idx_new): |
||||||
|
import matplotlib.pyplot as plt |
||||||
|
m = 'Set1' |
||||||
|
|
||||||
|
plt.figure() |
||||||
|
plt.subplot(1, 2, 1) |
||||||
|
plt.scatter(pts[:, 0], pts[:, 1], c=idx_old, cmap=m) |
||||||
|
plt.title("Old") |
||||||
|
plt.colorbar() |
||||||
|
plt.subplot(1, 2, 2) |
||||||
|
plt.scatter(pts[:, 0], pts[:, 1], c=idx_new, cmap=m) |
||||||
|
plt.title("New") |
||||||
|
plt.colorbar() |
||||||
|
|
||||||
|
plt.show() |
||||||
|
|
||||||
|
|
||||||
|
def same_clusters(correct, other): |
||||||
|
correct = np.asarray(correct) |
||||||
|
other = np.asarray(other) |
||||||
|
if len(correct) != len(other): |
||||||
|
return False |
||||||
|
|
||||||
|
for i in range(len(correct)): |
||||||
|
c = np.where(correct == correct[i]) |
||||||
|
o = np.where(other == other[i]) |
||||||
|
if not np.array_equal(c, o): |
||||||
|
return False |
||||||
|
return True |
||||||
|
|
||||||
|
|
||||||
|
class TestClustering(unittest.TestCase): |
||||||
|
def test_scipy_clustering(self): |
||||||
|
old_link = linkage_vector(TRACK_PTS, method='centroid') |
||||||
|
old_cluster_idxs = fcluster(old_link, 2.5, criterion='distance') |
||||||
|
|
||||||
|
np.testing.assert_allclose(old_link, CORRECT_LINK) |
||||||
|
np.testing.assert_allclose(old_cluster_idxs, CORRECT_LABELS) |
||||||
|
|
||||||
|
def test_pdist(self): |
||||||
|
pts = np.ascontiguousarray(TRACK_PTS, dtype=np.float64) |
||||||
|
pts_ptr = ffi.cast("double *", pts.ctypes.data) |
||||||
|
|
||||||
|
n, m = pts.shape |
||||||
|
out = np.zeros((n * (n - 1) / 2, ), dtype=np.float64) |
||||||
|
out_ptr = ffi.cast("double *", out.ctypes.data) |
||||||
|
hclust.hclust_pdist(n, m, pts_ptr, out_ptr) |
||||||
|
|
||||||
|
np.testing.assert_allclose(out, np.power(pdist(TRACK_PTS), 2)) |
||||||
|
|
||||||
|
def test_cpp_clustering(self): |
||||||
|
pts = np.ascontiguousarray(TRACK_PTS, dtype=np.float64) |
||||||
|
pts_ptr = ffi.cast("double *", pts.ctypes.data) |
||||||
|
n, m = pts.shape |
||||||
|
|
||||||
|
labels = np.zeros((n, ), dtype=np.int32) |
||||||
|
labels_ptr = ffi.cast("int *", labels.ctypes.data) |
||||||
|
hclust.cluster_points_centroid(n, m, pts_ptr, 2.5**2, labels_ptr) |
||||||
|
self.assertTrue(same_clusters(CORRECT_LABELS, labels)) |
||||||
|
|
||||||
|
def test_cpp_wrapper_clustering(self): |
||||||
|
labels = cluster_points_centroid(TRACK_PTS, 2.5) |
||||||
|
self.assertTrue(same_clusters(CORRECT_LABELS, labels)) |
||||||
|
|
||||||
|
def test_random_cluster(self): |
||||||
|
np.random.seed(1337) |
||||||
|
N = 1000 |
||||||
|
|
||||||
|
t_old = 0. |
||||||
|
t_new = 0. |
||||||
|
|
||||||
|
for _ in range(N): |
||||||
|
n = int(np.random.uniform(2, 32)) |
||||||
|
x = np.random.uniform(-10, 50, (n, 1)) |
||||||
|
y = np.random.uniform(-5, 5, (n, 1)) |
||||||
|
vrel = np.random.uniform(-5, 5, (n, 1)) |
||||||
|
pts = np.hstack([x, y, vrel]) |
||||||
|
|
||||||
|
t = time.time() |
||||||
|
old_link = linkage_vector(pts, method='centroid') |
||||||
|
old_cluster_idx = fcluster(old_link, 2.5, criterion='distance') |
||||||
|
t_old += time.time() - t |
||||||
|
|
||||||
|
t = time.time() |
||||||
|
cluster_idx = cluster_points_centroid(pts, 2.5) |
||||||
|
t_new += time.time() - t |
||||||
|
|
||||||
|
self.assertTrue(same_clusters(old_cluster_idx, cluster_idx)) |
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__": |
||||||
|
unittest.main() |
@ -0,0 +1,21 @@ |
|||||||
|
import numpy as np |
||||||
|
import os |
||||||
|
|
||||||
|
|
||||||
|
def gen_chi2_ppf_lookup(max_dim=200): |
||||||
|
from scipy.stats import chi2 |
||||||
|
table = np.zeros((max_dim, 98)) |
||||||
|
for dim in range(1,max_dim): |
||||||
|
table[dim] = chi2.ppf(np.arange(.01, .99, .01), dim) |
||||||
|
#outfile = open('chi2_lookup_table', 'w') |
||||||
|
np.save('chi2_lookup_table', table) |
||||||
|
|
||||||
|
|
||||||
|
def chi2_ppf(p, dim): |
||||||
|
table = np.load(os.path.dirname(os.path.realpath(__file__)) + '/chi2_lookup_table.npy') |
||||||
|
result = np.interp(p, np.arange(.01, .99, .01), table[dim]) |
||||||
|
return result |
||||||
|
|
||||||
|
|
||||||
|
if __name__== "__main__": |
||||||
|
gen_chi2_ppf_lookup() |
@ -0,0 +1,3 @@ |
|||||||
|
version https://git-lfs.github.com/spec/v1 |
||||||
|
oid sha256:0cc301b3a918cce76bd119a219e31722c6a03fa837927eb10fd84e877966cc3e |
||||||
|
size 156928 |
@ -1,3 +1,3 @@ |
|||||||
version https://git-lfs.github.com/spec/v1 |
version https://git-lfs.github.com/spec/v1 |
||||||
oid sha256:ac05170d2fbe9f5b65080edbb77cf408a1c90099575c1a3409d0a1ef75cd7a24 |
oid sha256:f340b8fa3d09329639271febcec3ba0c372e540a98ef47f7454811ddd575c253 |
||||||
size 1703704 |
size 1703704 |
||||||
|
@ -1,106 +0,0 @@ |
|||||||
{ |
|
||||||
"_comment": "These speeds are from https://wiki.openstreetmap.org/wiki/Speed_limits Special cases have been stripped", |
|
||||||
"AR:urban": "40", |
|
||||||
"AR:urban:primary": "60", |
|
||||||
"AR:urban:secondary": "60", |
|
||||||
"AR:rural": "110", |
|
||||||
"AT:urban": "50", |
|
||||||
"AT:rural": "100", |
|
||||||
"AT:trunk": "100", |
|
||||||
"AT:motorway": "130", |
|
||||||
"BE:urban": "50", |
|
||||||
"BE-VLG:rural": "70", |
|
||||||
"BE-WAL:rural": "90", |
|
||||||
"BE:trunk": "120", |
|
||||||
"BE:motorway": "120", |
|
||||||
"CH:urban[1]": "50", |
|
||||||
"CH:rural": "80", |
|
||||||
"CH:trunk": "100", |
|
||||||
"CH:motorway": "120", |
|
||||||
"CZ:pedestrian_zone": "20", |
|
||||||
"CZ:living_street": "20", |
|
||||||
"CZ:urban": "50", |
|
||||||
"CZ:urban_trunk": "80", |
|
||||||
"CZ:urban_motorway": "80", |
|
||||||
"CZ:rural": "90", |
|
||||||
"CZ:trunk": "110", |
|
||||||
"CZ:motorway": "130", |
|
||||||
"DK:urban": "50", |
|
||||||
"DK:rural": "80", |
|
||||||
"DK:motorway": "130", |
|
||||||
"DE:living_street": "7", |
|
||||||
"DE:residential": "30", |
|
||||||
"DE:urban": "50", |
|
||||||
"DE:rural": "100", |
|
||||||
"DE:trunk": "none", |
|
||||||
"DE:motorway": "none", |
|
||||||
"FI:urban": "50", |
|
||||||
"FI:rural": "80", |
|
||||||
"FI:trunk": "100", |
|
||||||
"FI:motorway": "120", |
|
||||||
"FR:urban": "50", |
|
||||||
"FR:rural": "80", |
|
||||||
"FR:trunk": "110", |
|
||||||
"FR:motorway": "130", |
|
||||||
"GR:urban": "50", |
|
||||||
"GR:rural": "90", |
|
||||||
"GR:trunk": "110", |
|
||||||
"GR:motorway": "130", |
|
||||||
"HU:urban": "50", |
|
||||||
"HU:rural": "90", |
|
||||||
"HU:trunk": "110", |
|
||||||
"HU:motorway": "130", |
|
||||||
"IT:urban": "50", |
|
||||||
"IT:rural": "90", |
|
||||||
"IT:trunk": "110", |
|
||||||
"IT:motorway": "130", |
|
||||||
"JP:national": "60", |
|
||||||
"JP:motorway": "100", |
|
||||||
"LT:living_street": "20", |
|
||||||
"LT:urban": "50", |
|
||||||
"LT:rural": "90", |
|
||||||
"LT:trunk": "120", |
|
||||||
"LT:motorway": "130", |
|
||||||
"PL:living_street": "20", |
|
||||||
"PL:urban": "50", |
|
||||||
"PL:rural": "90", |
|
||||||
"PL:trunk": "100", |
|
||||||
"PL:motorway": "140", |
|
||||||
"RO:urban": "50", |
|
||||||
"RO:rural": "90", |
|
||||||
"RO:trunk": "100", |
|
||||||
"RO:motorway": "130", |
|
||||||
"RU:living_street": "20", |
|
||||||
"RU:urban": "60", |
|
||||||
"RU:rural": "90", |
|
||||||
"RU:motorway": "110", |
|
||||||
"SK:urban": "50", |
|
||||||
"SK:rural": "90", |
|
||||||
"SK:trunk": "90", |
|
||||||
"SK:motorway": "90", |
|
||||||
"SI:urban": "50", |
|
||||||
"SI:rural": "90", |
|
||||||
"SI:trunk": "110", |
|
||||||
"SI:motorway": "130", |
|
||||||
"ES:living_street": "20", |
|
||||||
"ES:urban": "50", |
|
||||||
"ES:rural": "50", |
|
||||||
"ES:trunk": "90", |
|
||||||
"ES:motorway": "120", |
|
||||||
"SE:urban": "50", |
|
||||||
"SE:rural": "70", |
|
||||||
"SE:trunk": "90", |
|
||||||
"SE:motorway": "110", |
|
||||||
"GB:nsl_restricted": "30 mph", |
|
||||||
"GB:nsl_single": "60 mph", |
|
||||||
"GB:nsl_dual": "70 mph", |
|
||||||
"GB:motorway": "70 mph", |
|
||||||
"UA:urban": "50", |
|
||||||
"UA:rural": "90", |
|
||||||
"UA:trunk": "110", |
|
||||||
"UA:motorway": "130", |
|
||||||
"UZ:living_street": "30", |
|
||||||
"UZ:urban": "70", |
|
||||||
"UZ:rural": "100", |
|
||||||
"UZ:motorway": "110" |
|
||||||
} |
|
@ -1,240 +0,0 @@ |
|||||||
#!/usr/bin/env python |
|
||||||
import json |
|
||||||
|
|
||||||
DEFAULT_OUTPUT_FILENAME = "default_speeds_by_region.json" |
|
||||||
|
|
||||||
def main(filename = DEFAULT_OUTPUT_FILENAME): |
|
||||||
countries = [] |
|
||||||
|
|
||||||
""" |
|
||||||
-------------------------------------------------- |
|
||||||
US - United State of America |
|
||||||
-------------------------------------------------- |
|
||||||
""" |
|
||||||
US = Country("US") # First step, create the country using the ISO 3166 two letter code |
|
||||||
countries.append(US) # Second step, add the country to countries list |
|
||||||
|
|
||||||
""" Default rules """ |
|
||||||
# Third step, add some default rules for the country |
|
||||||
# Speed limit rules are based on OpenStreetMaps (OSM) tags. |
|
||||||
# The dictionary {...} defines the tag_name: value |
|
||||||
# if a road in OSM has a tag with the name tag_name and this value, the speed limit listed below will be applied. |
|
||||||
# The text at the end is the speed limit (use no unit for km/h) |
|
||||||
# Rules apply in the order in which they are written for each country |
|
||||||
# Rules for specific regions (states) take priority over country rules |
|
||||||
# If you modify existing country rules, you must update all existing states without that rule to use the old rule |
|
||||||
US.add_rule({"highway": "motorway"}, "65 mph") # On US roads with the tag highway and value motorway, the speed limit will default to 65 mph |
|
||||||
US.add_rule({"highway": "trunk"}, "55 mph") |
|
||||||
US.add_rule({"highway": "primary"}, "55 mph") |
|
||||||
US.add_rule({"highway": "secondary"}, "45 mph") |
|
||||||
US.add_rule({"highway": "tertiary"}, "35 mph") |
|
||||||
US.add_rule({"highway": "unclassified"}, "55 mph") |
|
||||||
US.add_rule({"highway": "residential"}, "25 mph") |
|
||||||
US.add_rule({"highway": "service"}, "25 mph") |
|
||||||
US.add_rule({"highway": "motorway_link"}, "55 mph") |
|
||||||
US.add_rule({"highway": "trunk_link"}, "55 mph") |
|
||||||
US.add_rule({"highway": "primary_link"}, "55 mph") |
|
||||||
US.add_rule({"highway": "secondary_link"}, "45 mph") |
|
||||||
US.add_rule({"highway": "tertiary_link"}, "35 mph") |
|
||||||
US.add_rule({"highway": "living_street"}, "15 mph") |
|
||||||
|
|
||||||
""" States """ |
|
||||||
new_york = US.add_region("New York") # Fourth step, add a state/region to country |
|
||||||
new_york.add_rule({"highway": "primary"}, "45 mph") # Fifth step , add rules to the state. See the text above for how to write rules |
|
||||||
new_york.add_rule({"highway": "secondary"}, "55 mph") |
|
||||||
new_york.add_rule({"highway": "tertiary"}, "55 mph") |
|
||||||
new_york.add_rule({"highway": "residential"}, "30 mph") |
|
||||||
new_york.add_rule({"highway": "primary_link"}, "45 mph") |
|
||||||
new_york.add_rule({"highway": "secondary_link"}, "55 mph") |
|
||||||
new_york.add_rule({"highway": "tertiary_link"}, "55 mph") |
|
||||||
# All if not written by the state, the rules will default to the country rules |
|
||||||
|
|
||||||
#california = US.add_region("California") |
|
||||||
# California uses only the default US rules |
|
||||||
|
|
||||||
michigan = US.add_region("Michigan") |
|
||||||
michigan.add_rule({"highway": "motorway"}, "70 mph") |
|
||||||
|
|
||||||
oregon = US.add_region("Oregon") |
|
||||||
oregon.add_rule({"highway": "motorway"}, "55 mph") |
|
||||||
oregon.add_rule({"highway": "secondary"}, "35 mph") |
|
||||||
oregon.add_rule({"highway": "tertiary"}, "30 mph") |
|
||||||
oregon.add_rule({"highway": "service"}, "15 mph") |
|
||||||
oregon.add_rule({"highway": "secondary_link"}, "35 mph") |
|
||||||
oregon.add_rule({"highway": "tertiary_link"}, "30 mph") |
|
||||||
|
|
||||||
south_dakota = US.add_region("South Dakota") |
|
||||||
south_dakota.add_rule({"highway": "motorway"}, "80 mph") |
|
||||||
south_dakota.add_rule({"highway": "trunk"}, "70 mph") |
|
||||||
south_dakota.add_rule({"highway": "primary"}, "65 mph") |
|
||||||
south_dakota.add_rule({"highway": "trunk_link"}, "70 mph") |
|
||||||
south_dakota.add_rule({"highway": "primary_link"}, "65 mph") |
|
||||||
|
|
||||||
wisconsin = US.add_region("Wisconsin") |
|
||||||
wisconsin.add_rule({"highway": "trunk"}, "65 mph") |
|
||||||
wisconsin.add_rule({"highway": "tertiary"}, "45 mph") |
|
||||||
wisconsin.add_rule({"highway": "unclassified"}, "35 mph") |
|
||||||
wisconsin.add_rule({"highway": "trunk_link"}, "65 mph") |
|
||||||
wisconsin.add_rule({"highway": "tertiary_link"}, "45 mph") |
|
||||||
|
|
||||||
""" |
|
||||||
-------------------------------------------------- |
|
||||||
AU - Australia |
|
||||||
-------------------------------------------------- |
|
||||||
""" |
|
||||||
AU = Country("AU") |
|
||||||
countries.append(AU) |
|
||||||
|
|
||||||
""" Default rules """ |
|
||||||
AU.add_rule({"highway": "motorway"}, "100") |
|
||||||
AU.add_rule({"highway": "trunk"}, "80") |
|
||||||
AU.add_rule({"highway": "primary"}, "80") |
|
||||||
AU.add_rule({"highway": "secondary"}, "50") |
|
||||||
AU.add_rule({"highway": "tertiary"}, "50") |
|
||||||
AU.add_rule({"highway": "unclassified"}, "80") |
|
||||||
AU.add_rule({"highway": "residential"}, "50") |
|
||||||
AU.add_rule({"highway": "service"}, "40") |
|
||||||
AU.add_rule({"highway": "motorway_link"}, "90") |
|
||||||
AU.add_rule({"highway": "trunk_link"}, "80") |
|
||||||
AU.add_rule({"highway": "primary_link"}, "80") |
|
||||||
AU.add_rule({"highway": "secondary_link"}, "50") |
|
||||||
AU.add_rule({"highway": "tertiary_link"}, "50") |
|
||||||
AU.add_rule({"highway": "living_street"}, "30") |
|
||||||
|
|
||||||
""" |
|
||||||
-------------------------------------------------- |
|
||||||
CA - Canada |
|
||||||
-------------------------------------------------- |
|
||||||
""" |
|
||||||
CA = Country("CA") |
|
||||||
countries.append(CA) |
|
||||||
|
|
||||||
""" Default rules """ |
|
||||||
CA.add_rule({"highway": "motorway"}, "100") |
|
||||||
CA.add_rule({"highway": "trunk"}, "80") |
|
||||||
CA.add_rule({"highway": "primary"}, "80") |
|
||||||
CA.add_rule({"highway": "secondary"}, "50") |
|
||||||
CA.add_rule({"highway": "tertiary"}, "50") |
|
||||||
CA.add_rule({"highway": "unclassified"}, "80") |
|
||||||
CA.add_rule({"highway": "residential"}, "40") |
|
||||||
CA.add_rule({"highway": "service"}, "40") |
|
||||||
CA.add_rule({"highway": "motorway_link"}, "90") |
|
||||||
CA.add_rule({"highway": "trunk_link"}, "80") |
|
||||||
CA.add_rule({"highway": "primary_link"}, "80") |
|
||||||
CA.add_rule({"highway": "secondary_link"}, "50") |
|
||||||
CA.add_rule({"highway": "tertiary_link"}, "50") |
|
||||||
CA.add_rule({"highway": "living_street"}, "20") |
|
||||||
|
|
||||||
|
|
||||||
""" |
|
||||||
-------------------------------------------------- |
|
||||||
DE - Germany |
|
||||||
-------------------------------------------------- |
|
||||||
""" |
|
||||||
DE = Country("DE") |
|
||||||
countries.append(DE) |
|
||||||
|
|
||||||
""" Default rules """ |
|
||||||
DE.add_rule({"highway": "motorway"}, "none") |
|
||||||
DE.add_rule({"highway": "living_street"}, "10") |
|
||||||
DE.add_rule({"highway": "residential"}, "30") |
|
||||||
DE.add_rule({"zone:traffic": "DE:rural"}, "100") |
|
||||||
DE.add_rule({"zone:traffic": "DE:urban"}, "50") |
|
||||||
DE.add_rule({"zone:maxspeed": "DE:30"}, "30") |
|
||||||
DE.add_rule({"zone:maxspeed": "DE:urban"}, "50") |
|
||||||
DE.add_rule({"zone:maxspeed": "DE:rural"}, "100") |
|
||||||
DE.add_rule({"zone:maxspeed": "DE:motorway"}, "none") |
|
||||||
DE.add_rule({"bicycle_road": "yes"}, "30") |
|
||||||
|
|
||||||
|
|
||||||
""" |
|
||||||
-------------------------------------------------- |
|
||||||
EE - Estonia |
|
||||||
-------------------------------------------------- |
|
||||||
""" |
|
||||||
EE = Country("EE") |
|
||||||
countries.append(EE) |
|
||||||
|
|
||||||
""" Default rules """ |
|
||||||
EE.add_rule({"highway": "motorway"}, "90") |
|
||||||
EE.add_rule({"highway": "trunk"}, "90") |
|
||||||
EE.add_rule({"highway": "primary"}, "90") |
|
||||||
EE.add_rule({"highway": "secondary"}, "50") |
|
||||||
EE.add_rule({"highway": "tertiary"}, "50") |
|
||||||
EE.add_rule({"highway": "unclassified"}, "90") |
|
||||||
EE.add_rule({"highway": "residential"}, "40") |
|
||||||
EE.add_rule({"highway": "service"}, "40") |
|
||||||
EE.add_rule({"highway": "motorway_link"}, "90") |
|
||||||
EE.add_rule({"highway": "trunk_link"}, "70") |
|
||||||
EE.add_rule({"highway": "primary_link"}, "70") |
|
||||||
EE.add_rule({"highway": "secondary_link"}, "50") |
|
||||||
EE.add_rule({"highway": "tertiary_link"}, "50") |
|
||||||
EE.add_rule({"highway": "living_street"}, "20") |
|
||||||
|
|
||||||
|
|
||||||
""" --- DO NOT MODIFY CODE BELOW THIS LINE --- """ |
|
||||||
""" --- ADD YOUR COUNTRY OR STATE ABOVE --- """ |
|
||||||
|
|
||||||
# Final step |
|
||||||
write_json(countries, filename) |
|
||||||
|
|
||||||
def write_json(countries, filename = DEFAULT_OUTPUT_FILENAME): |
|
||||||
out_dict = {} |
|
||||||
for country in countries: |
|
||||||
out_dict.update(country.jsonify()) |
|
||||||
json_string = json.dumps(out_dict, indent=2) |
|
||||||
with open(filename, "wb") as f: |
|
||||||
f.write(json_string) |
|
||||||
|
|
||||||
|
|
||||||
class Region(object): |
|
||||||
ALLOWABLE_TAG_KEYS = ["highway", "zone:traffic", "bicycle_road", "zone:maxspeed"] |
|
||||||
ALLOWABLE_HIGHWAY_TYPES = ["motorway", "trunk", "primary", "secondary", "tertiary", "unclassified", "residential", "service", "motorway_link", "trunk_link", "primary_link", "secondary_link", "tertiary_link", "living_street"] |
|
||||||
def __init__(self, name): |
|
||||||
self.name = name |
|
||||||
self.rules = [] |
|
||||||
|
|
||||||
def add_rule(self, tag_conditions, speed): |
|
||||||
new_rule = {} |
|
||||||
if not isinstance(tag_conditions, dict): |
|
||||||
raise TypeError("Rule tag conditions must be dictionary") |
|
||||||
if not all(tag_key in self.ALLOWABLE_TAG_KEYS for tag_key in tag_conditions): |
|
||||||
raise ValueError("Rule tag keys must be in allowable tag kesy") # If this is by mistake, please update ALLOWABLE_TAG_KEYS |
|
||||||
if 'highway' in tag_conditions: |
|
||||||
if not tag_conditions['highway'] in self.ALLOWABLE_HIGHWAY_TYPES: |
|
||||||
raise ValueError("Invalid Highway type {}".format(tag_conditions["highway"])) |
|
||||||
new_rule['tags'] = tag_conditions |
|
||||||
try: |
|
||||||
new_rule['speed'] = str(speed) |
|
||||||
except ValueError: |
|
||||||
raise ValueError("Rule speed must be string") |
|
||||||
self.rules.append(new_rule) |
|
||||||
|
|
||||||
def jsonify(self): |
|
||||||
ret_dict = {} |
|
||||||
ret_dict[self.name] = self.rules |
|
||||||
return ret_dict |
|
||||||
|
|
||||||
class Country(Region): |
|
||||||
ALLOWABLE_COUNTRY_CODES = ["AF","AX","AL","DZ","AS","AD","AO","AI","AQ","AG","AR","AM","AW","AU","AT","AZ","BS","BH","BD","BB","BY","BE","BZ","BJ","BM","BT","BO","BQ","BA","BW","BV","BR","IO","BN","BG","BF","BI","KH","CM","CA","CV","KY","CF","TD","CL","CN","CX","CC","CO","KM","CG","CD","CK","CR","CI","HR","CU","CW","CY","CZ","DK","DJ","DM","DO","EC","EG","SV","GQ","ER","EE","ET","FK","FO","FJ","FI","FR","GF","PF","TF","GA","GM","GE","DE","GH","GI","GR","GL","GD","GP","GU","GT","GG","GN","GW","GY","HT","HM","VA","HN","HK","HU","IS","IN","ID","IR","IQ","IE","IM","IL","IT","JM","JP","JE","JO","KZ","KE","KI","KP","KR","KW","KG","LA","LV","LB","LS","LR","LY","LI","LT","LU","MO","MK","MG","MW","MY","MV","ML","MT","MH","MQ","MR","MU","YT","MX","FM","MD","MC","MN","ME","MS","MA","MZ","MM","NA","NR","NP","NL","NC","NZ","NI","NE","NG","NU","NF","MP","NO","OM","PK","PW","PS","PA","PG","PY","PE","PH","PN","PL","PT","PR","QA","RE","RO","RU","RW","BL","SH","KN","LC","MF","PM","VC","WS","SM","ST","SA","SN","RS","SC","SL","SG","SX","SK","SI","SB","SO","ZA","GS","SS","ES","LK","SD","SR","SJ","SZ","SE","CH","SY","TW","TJ","TZ","TH","TL","TG","TK","TO","TT","TN","TR","TM","TC","TV","UG","UA","AE","GB","US","UM","UY","UZ","VU","VE","VN","VG","VI","WF","EH","YE","ZM","ZW"] |
|
||||||
def __init__(self, ISO_3166_alpha_2): |
|
||||||
Region.__init__(self, ISO_3166_alpha_2) |
|
||||||
if ISO_3166_alpha_2 not in self.ALLOWABLE_COUNTRY_CODES: |
|
||||||
raise ValueError("Not valid IOS 3166 country code") |
|
||||||
self.regions = {} |
|
||||||
|
|
||||||
def add_region(self, name): |
|
||||||
self.regions[name] = Region(name) |
|
||||||
return self.regions[name] |
|
||||||
|
|
||||||
def jsonify(self): |
|
||||||
ret_dict = {} |
|
||||||
ret_dict[self.name] = {} |
|
||||||
for r_name, region in self.regions.items(): |
|
||||||
ret_dict[self.name].update(region.jsonify()) |
|
||||||
ret_dict[self.name]['Default'] = self.rules |
|
||||||
return ret_dict |
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__': |
|
||||||
main() |
|
@ -1,297 +0,0 @@ |
|||||||
#!/usr/bin/env python |
|
||||||
|
|
||||||
# Add phonelibs openblas to LD_LIBRARY_PATH if import fails |
|
||||||
from common.basedir import BASEDIR |
|
||||||
try: |
|
||||||
from scipy import spatial |
|
||||||
except ImportError as e: |
|
||||||
import os |
|
||||||
import sys |
|
||||||
|
|
||||||
|
|
||||||
openblas_path = os.path.join(BASEDIR, "phonelibs/openblas/") |
|
||||||
os.environ['LD_LIBRARY_PATH'] += ':' + openblas_path |
|
||||||
|
|
||||||
args = [sys.executable] |
|
||||||
args.extend(sys.argv) |
|
||||||
os.execv(sys.executable, args) |
|
||||||
|
|
||||||
DEFAULT_SPEEDS_BY_REGION_JSON_FILE = BASEDIR + "/selfdrive/mapd/default_speeds_by_region.json" |
|
||||||
from selfdrive.mapd import default_speeds_generator |
|
||||||
default_speeds_generator.main(DEFAULT_SPEEDS_BY_REGION_JSON_FILE) |
|
||||||
|
|
||||||
import os |
|
||||||
import sys |
|
||||||
import time |
|
||||||
import zmq |
|
||||||
import threading |
|
||||||
import numpy as np |
|
||||||
import overpy |
|
||||||
from collections import defaultdict |
|
||||||
|
|
||||||
from common.params import Params |
|
||||||
from common.transformations.coordinates import geodetic2ecef |
|
||||||
from selfdrive.services import service_list |
|
||||||
import selfdrive.messaging as messaging |
|
||||||
from selfdrive.mapd.mapd_helpers import MAPS_LOOKAHEAD_DISTANCE, Way, circle_through_points |
|
||||||
import selfdrive.crash as crash |
|
||||||
from selfdrive.version import version, dirty |
|
||||||
|
|
||||||
|
|
||||||
OVERPASS_API_URL = "https://overpass.kumi.systems/api/interpreter" |
|
||||||
OVERPASS_HEADERS = { |
|
||||||
'User-Agent': 'NEOS (comma.ai)', |
|
||||||
'Accept-Encoding': 'gzip' |
|
||||||
} |
|
||||||
|
|
||||||
last_gps = None |
|
||||||
query_lock = threading.Lock() |
|
||||||
last_query_result = None |
|
||||||
last_query_pos = None |
|
||||||
cache_valid = False |
|
||||||
|
|
||||||
def build_way_query(lat, lon, radius=50): |
|
||||||
"""Builds a query to find all highways within a given radius around a point""" |
|
||||||
pos = " (around:%f,%f,%f)" % (radius, lat, lon) |
|
||||||
lat_lon = "(%f,%f)" % (lat, lon) |
|
||||||
q = """( |
|
||||||
way |
|
||||||
""" + pos + """ |
|
||||||
[highway][highway!~"^(footway|path|bridleway|steps|cycleway|construction|bus_guideway|escape)$"]; |
|
||||||
>;);out;""" + """is_in""" + lat_lon + """;area._[admin_level~"[24]"]; |
|
||||||
convert area ::id = id(), admin_level = t['admin_level'], |
|
||||||
name = t['name'], "ISO3166-1:alpha2" = t['ISO3166-1:alpha2'];out; |
|
||||||
""" |
|
||||||
return q |
|
||||||
|
|
||||||
|
|
||||||
def query_thread(): |
|
||||||
global last_query_result, last_query_pos, cache_valid |
|
||||||
api = overpy.Overpass(url=OVERPASS_API_URL, headers=OVERPASS_HEADERS, timeout=10.) |
|
||||||
|
|
||||||
while True: |
|
||||||
time.sleep(1) |
|
||||||
if last_gps is not None: |
|
||||||
fix_ok = last_gps.flags & 1 |
|
||||||
if not fix_ok: |
|
||||||
continue |
|
||||||
|
|
||||||
if last_query_pos is not None: |
|
||||||
cur_ecef = geodetic2ecef((last_gps.latitude, last_gps.longitude, last_gps.altitude)) |
|
||||||
prev_ecef = geodetic2ecef((last_query_pos.latitude, last_query_pos.longitude, last_query_pos.altitude)) |
|
||||||
dist = np.linalg.norm(cur_ecef - prev_ecef) |
|
||||||
if dist < 1000: #updated when we are 1km from the edge of the downloaded circle |
|
||||||
continue |
|
||||||
|
|
||||||
if dist > 3000: |
|
||||||
cache_valid = False |
|
||||||
|
|
||||||
q = build_way_query(last_gps.latitude, last_gps.longitude, radius=3000) |
|
||||||
try: |
|
||||||
new_result = api.query(q) |
|
||||||
|
|
||||||
# Build kd-tree |
|
||||||
nodes = [] |
|
||||||
real_nodes = [] |
|
||||||
node_to_way = defaultdict(list) |
|
||||||
location_info = {} |
|
||||||
|
|
||||||
for n in new_result.nodes: |
|
||||||
nodes.append((float(n.lat), float(n.lon), 0)) |
|
||||||
real_nodes.append(n) |
|
||||||
|
|
||||||
for way in new_result.ways: |
|
||||||
for n in way.nodes: |
|
||||||
node_to_way[n.id].append(way) |
|
||||||
|
|
||||||
for area in new_result.areas: |
|
||||||
if area.tags.get('admin_level', '') == "2": |
|
||||||
location_info['country'] = area.tags.get('ISO3166-1:alpha2', '') |
|
||||||
if area.tags.get('admin_level', '') == "4": |
|
||||||
location_info['region'] = area.tags.get('name', '') |
|
||||||
|
|
||||||
nodes = np.asarray(nodes) |
|
||||||
nodes = geodetic2ecef(nodes) |
|
||||||
tree = spatial.cKDTree(nodes) |
|
||||||
|
|
||||||
query_lock.acquire() |
|
||||||
last_query_result = new_result, tree, real_nodes, node_to_way, location_info |
|
||||||
last_query_pos = last_gps |
|
||||||
cache_valid = True |
|
||||||
query_lock.release() |
|
||||||
|
|
||||||
except Exception as e: |
|
||||||
print(e) |
|
||||||
query_lock.acquire() |
|
||||||
last_query_result = None |
|
||||||
query_lock.release() |
|
||||||
|
|
||||||
|
|
||||||
def mapsd_thread(): |
|
||||||
global last_gps |
|
||||||
|
|
||||||
context = zmq.Context() |
|
||||||
gps_sock = messaging.sub_sock(context, service_list['gpsLocation'].port, conflate=True) |
|
||||||
gps_external_sock = messaging.sub_sock(context, service_list['gpsLocationExternal'].port, conflate=True) |
|
||||||
map_data_sock = messaging.pub_sock(context, service_list['liveMapData'].port) |
|
||||||
|
|
||||||
cur_way = None |
|
||||||
curvature_valid = False |
|
||||||
curvature = None |
|
||||||
upcoming_curvature = 0. |
|
||||||
dist_to_turn = 0. |
|
||||||
road_points = None |
|
||||||
|
|
||||||
while True: |
|
||||||
gps = messaging.recv_one(gps_sock) |
|
||||||
gps_ext = messaging.recv_one_or_none(gps_external_sock) |
|
||||||
|
|
||||||
if gps_ext is not None: |
|
||||||
gps = gps_ext.gpsLocationExternal |
|
||||||
else: |
|
||||||
gps = gps.gpsLocation |
|
||||||
|
|
||||||
last_gps = gps |
|
||||||
|
|
||||||
fix_ok = gps.flags & 1 |
|
||||||
if not fix_ok or last_query_result is None or not cache_valid: |
|
||||||
cur_way = None |
|
||||||
curvature = None |
|
||||||
curvature_valid = False |
|
||||||
upcoming_curvature = 0. |
|
||||||
dist_to_turn = 0. |
|
||||||
road_points = None |
|
||||||
map_valid = False |
|
||||||
else: |
|
||||||
map_valid = True |
|
||||||
lat = gps.latitude |
|
||||||
lon = gps.longitude |
|
||||||
heading = gps.bearing |
|
||||||
speed = gps.speed |
|
||||||
|
|
||||||
query_lock.acquire() |
|
||||||
cur_way = Way.closest(last_query_result, lat, lon, heading, cur_way) |
|
||||||
if cur_way is not None: |
|
||||||
pnts, curvature_valid = cur_way.get_lookahead(lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE) |
|
||||||
|
|
||||||
xs = pnts[:, 0] |
|
||||||
ys = pnts[:, 1] |
|
||||||
road_points = [float(x) for x in xs], [float(y) for y in ys] |
|
||||||
|
|
||||||
if speed < 10: |
|
||||||
curvature_valid = False |
|
||||||
if curvature_valid and pnts.shape[0] <= 3: |
|
||||||
curvature_valid = False |
|
||||||
|
|
||||||
# The curvature is valid when at least MAPS_LOOKAHEAD_DISTANCE of road is found |
|
||||||
if curvature_valid: |
|
||||||
# Compute the curvature for each point |
|
||||||
with np.errstate(divide='ignore'): |
|
||||||
circles = [circle_through_points(*p) for p in zip(pnts, pnts[1:], pnts[2:])] |
|
||||||
circles = np.asarray(circles) |
|
||||||
radii = np.nan_to_num(circles[:, 2]) |
|
||||||
radii[radii < 10] = np.inf |
|
||||||
curvature = 1. / radii |
|
||||||
|
|
||||||
# Index of closest point |
|
||||||
closest = np.argmin(np.linalg.norm(pnts, axis=1)) |
|
||||||
dist_to_closest = pnts[closest, 0] # We can use x distance here since it should be close |
|
||||||
|
|
||||||
# Compute distance along path |
|
||||||
dists = list() |
|
||||||
dists.append(0) |
|
||||||
for p, p_prev in zip(pnts, pnts[1:, :]): |
|
||||||
dists.append(dists[-1] + np.linalg.norm(p - p_prev)) |
|
||||||
dists = np.asarray(dists) |
|
||||||
dists = dists - dists[closest] + dist_to_closest |
|
||||||
dists = dists[1:-1] |
|
||||||
|
|
||||||
close_idx = np.logical_and(dists > 0, dists < 500) |
|
||||||
dists = dists[close_idx] |
|
||||||
curvature = curvature[close_idx] |
|
||||||
|
|
||||||
if len(curvature): |
|
||||||
# TODO: Determine left or right turn |
|
||||||
curvature = np.nan_to_num(curvature) |
|
||||||
|
|
||||||
# Outlier rejection |
|
||||||
new_curvature = np.percentile(curvature, 90, interpolation='lower') |
|
||||||
|
|
||||||
k = 0.6 |
|
||||||
upcoming_curvature = k * upcoming_curvature + (1 - k) * new_curvature |
|
||||||
in_turn_indices = curvature > 0.8 * new_curvature |
|
||||||
|
|
||||||
if np.any(in_turn_indices): |
|
||||||
dist_to_turn = np.min(dists[in_turn_indices]) |
|
||||||
else: |
|
||||||
dist_to_turn = 999 |
|
||||||
else: |
|
||||||
upcoming_curvature = 0. |
|
||||||
dist_to_turn = 999 |
|
||||||
|
|
||||||
query_lock.release() |
|
||||||
|
|
||||||
dat = messaging.new_message() |
|
||||||
dat.init('liveMapData') |
|
||||||
|
|
||||||
if last_gps is not None: |
|
||||||
dat.liveMapData.lastGps = last_gps |
|
||||||
|
|
||||||
if cur_way is not None: |
|
||||||
dat.liveMapData.wayId = cur_way.id |
|
||||||
|
|
||||||
# Speed limit |
|
||||||
max_speed = cur_way.max_speed() |
|
||||||
if max_speed is not None: |
|
||||||
dat.liveMapData.speedLimitValid = True |
|
||||||
dat.liveMapData.speedLimit = max_speed |
|
||||||
|
|
||||||
# TODO: use the function below to anticipate upcoming speed limits |
|
||||||
#max_speed_ahead, max_speed_ahead_dist = cur_way.max_speed_ahead(max_speed, lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE) |
|
||||||
#if max_speed_ahead is not None and max_speed_ahead_dist is not None: |
|
||||||
# dat.liveMapData.speedLimitAheadValid = True |
|
||||||
# dat.liveMapData.speedLimitAhead = float(max_speed_ahead) |
|
||||||
# dat.liveMapData.speedLimitAheadDistance = float(max_speed_ahead_dist) |
|
||||||
|
|
||||||
|
|
||||||
advisory_max_speed = cur_way.advisory_max_speed() |
|
||||||
if advisory_max_speed is not None: |
|
||||||
dat.liveMapData.speedAdvisoryValid = True |
|
||||||
dat.liveMapData.speedAdvisory = advisory_max_speed |
|
||||||
|
|
||||||
# Curvature |
|
||||||
dat.liveMapData.curvatureValid = curvature_valid |
|
||||||
dat.liveMapData.curvature = float(upcoming_curvature) |
|
||||||
dat.liveMapData.distToTurn = float(dist_to_turn) |
|
||||||
if road_points is not None: |
|
||||||
dat.liveMapData.roadX, dat.liveMapData.roadY = road_points |
|
||||||
if curvature is not None: |
|
||||||
dat.liveMapData.roadCurvatureX = [float(x) for x in dists] |
|
||||||
dat.liveMapData.roadCurvature = [float(x) for x in curvature] |
|
||||||
|
|
||||||
dat.liveMapData.mapValid = map_valid |
|
||||||
|
|
||||||
map_data_sock.send(dat.to_bytes()) |
|
||||||
|
|
||||||
|
|
||||||
def main(gctx=None): |
|
||||||
params = Params() |
|
||||||
dongle_id = params.get("DongleId") |
|
||||||
crash.bind_user(id=dongle_id) |
|
||||||
crash.bind_extra(version=version, dirty=dirty, is_eon=True) |
|
||||||
crash.install() |
|
||||||
|
|
||||||
main_thread = threading.Thread(target=mapsd_thread) |
|
||||||
main_thread.daemon = True |
|
||||||
main_thread.start() |
|
||||||
|
|
||||||
q_thread = threading.Thread(target=query_thread) |
|
||||||
q_thread.daemon = True |
|
||||||
q_thread.start() |
|
||||||
|
|
||||||
while True: |
|
||||||
time.sleep(0.1) |
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__": |
|
||||||
main() |
|
@ -1,364 +0,0 @@ |
|||||||
import math |
|
||||||
import json |
|
||||||
import numpy as np |
|
||||||
from datetime import datetime |
|
||||||
from common.basedir import BASEDIR |
|
||||||
from selfdrive.config import Conversions as CV |
|
||||||
from common.transformations.coordinates import LocalCoord, geodetic2ecef |
|
||||||
|
|
||||||
LOOKAHEAD_TIME = 10. |
|
||||||
MAPS_LOOKAHEAD_DISTANCE = 50 * LOOKAHEAD_TIME |
|
||||||
|
|
||||||
DEFAULT_SPEEDS_JSON_FILE = BASEDIR + "/selfdrive/mapd/default_speeds.json" |
|
||||||
DEFAULT_SPEEDS = {} |
|
||||||
with open(DEFAULT_SPEEDS_JSON_FILE, "rb") as f: |
|
||||||
DEFAULT_SPEEDS = json.loads(f.read()) |
|
||||||
|
|
||||||
DEFAULT_SPEEDS_BY_REGION_JSON_FILE = BASEDIR + "/selfdrive/mapd/default_speeds_by_region.json" |
|
||||||
DEFAULT_SPEEDS_BY_REGION = {} |
|
||||||
with open(DEFAULT_SPEEDS_BY_REGION_JSON_FILE, "rb") as f: |
|
||||||
DEFAULT_SPEEDS_BY_REGION = json.loads(f.read()) |
|
||||||
|
|
||||||
def circle_through_points(p1, p2, p3): |
|
||||||
"""Fits a circle through three points |
|
||||||
Formulas from: http://www.ambrsoft.com/trigocalc/circle3d.htm""" |
|
||||||
x1, y1, _ = p1 |
|
||||||
x2, y2, _ = p2 |
|
||||||
x3, y3, _ = p3 |
|
||||||
|
|
||||||
A = x1 * (y2 - y3) - y1 * (x2 - x3) + x2 * y3 - x3 * y2 |
|
||||||
B = (x1**2 + y1**2) * (y3 - y2) + (x2**2 + y2**2) * (y1 - y3) + (x3**2 + y3**2) * (y2 - y1) |
|
||||||
C = (x1**2 + y1**2) * (x2 - x3) + (x2**2 + y2**2) * (x3 - x1) + (x3**2 + y3**2) * (x1 - x2) |
|
||||||
D = (x1**2 + y1**2) * (x3 * y2 - x2 * y3) + (x2**2 + y2**2) * (x1 * y3 - x3 * y1) + (x3**2 + y3**2) * (x2 * y1 - x1 * y2) |
|
||||||
|
|
||||||
return (-B / (2 * A), - C / (2 * A), np.sqrt((B**2 + C**2 - 4 * A * D) / (4 * A**2))) |
|
||||||
|
|
||||||
def parse_speed_unit(max_speed): |
|
||||||
"""Converts a maxspeed string to m/s based on the unit present in the input. |
|
||||||
OpenStreetMap defaults to kph if no unit is present. """ |
|
||||||
|
|
||||||
if not max_speed: |
|
||||||
return None |
|
||||||
|
|
||||||
conversion = CV.KPH_TO_MS |
|
||||||
if 'mph' in max_speed: |
|
||||||
max_speed = max_speed.replace(' mph', '') |
|
||||||
conversion = CV.MPH_TO_MS |
|
||||||
try: |
|
||||||
return float(max_speed) * conversion |
|
||||||
except ValueError: |
|
||||||
return None |
|
||||||
|
|
||||||
def parse_speed_tags(tags): |
|
||||||
"""Parses tags on a way to find the maxspeed string""" |
|
||||||
max_speed = None |
|
||||||
|
|
||||||
if 'maxspeed' in tags: |
|
||||||
max_speed = tags['maxspeed'] |
|
||||||
|
|
||||||
if 'maxspeed:conditional' in tags: |
|
||||||
try: |
|
||||||
max_speed_cond, cond = tags['maxspeed:conditional'].split(' @ ') |
|
||||||
cond = cond[1:-1] |
|
||||||
|
|
||||||
start, end = cond.split('-') |
|
||||||
now = datetime.now() # TODO: Get time and timezone from gps fix so this will work correctly on replays |
|
||||||
start = datetime.strptime(start, "%H:%M").replace(year=now.year, month=now.month, day=now.day) |
|
||||||
end = datetime.strptime(end, "%H:%M").replace(year=now.year, month=now.month, day=now.day) |
|
||||||
|
|
||||||
if start <= now <= end: |
|
||||||
max_speed = max_speed_cond |
|
||||||
except ValueError: |
|
||||||
pass |
|
||||||
|
|
||||||
if not max_speed and 'source:maxspeed' in tags: |
|
||||||
max_speed = DEFAULT_SPEEDS.get(tags['source:maxspeed'], None) |
|
||||||
if not max_speed and 'maxspeed:type' in tags: |
|
||||||
max_speed = DEFAULT_SPEEDS.get(tags['maxspeed:type'], None) |
|
||||||
|
|
||||||
max_speed = parse_speed_unit(max_speed) |
|
||||||
return max_speed |
|
||||||
|
|
||||||
def geocode_maxspeed(tags, location_info): |
|
||||||
max_speed = None |
|
||||||
try: |
|
||||||
geocode_country = location_info.get('country', '') |
|
||||||
geocode_region = location_info.get('region', '') |
|
||||||
|
|
||||||
country_rules = DEFAULT_SPEEDS_BY_REGION.get(geocode_country, {}) |
|
||||||
country_defaults = country_rules.get('Default', []) |
|
||||||
for rule in country_defaults: |
|
||||||
rule_valid = all( |
|
||||||
tag_name in tags |
|
||||||
and tags[tag_name] == value |
|
||||||
for tag_name, value in rule['tags'].items() |
|
||||||
) |
|
||||||
if rule_valid: |
|
||||||
max_speed = rule['speed'] |
|
||||||
break #stop searching country |
|
||||||
|
|
||||||
region_rules = country_rules.get(geocode_region, []) |
|
||||||
for rule in region_rules: |
|
||||||
rule_valid = all( |
|
||||||
tag_name in tags |
|
||||||
and tags[tag_name] == value |
|
||||||
for tag_name, value in rule['tags'].items() |
|
||||||
) |
|
||||||
if rule_valid: |
|
||||||
max_speed = rule['speed'] |
|
||||||
break #stop searching region |
|
||||||
except KeyError: |
|
||||||
pass |
|
||||||
max_speed = parse_speed_unit(max_speed) |
|
||||||
return max_speed |
|
||||||
|
|
||||||
class Way: |
|
||||||
def __init__(self, way, query_results): |
|
||||||
self.id = way.id |
|
||||||
self.way = way |
|
||||||
self.query_results = query_results |
|
||||||
|
|
||||||
points = list() |
|
||||||
|
|
||||||
for node in self.way.get_nodes(resolve_missing=False): |
|
||||||
points.append((float(node.lat), float(node.lon), 0.)) |
|
||||||
|
|
||||||
self.points = np.asarray(points) |
|
||||||
|
|
||||||
@classmethod |
|
||||||
def closest(cls, query_results, lat, lon, heading, prev_way=None): |
|
||||||
results, tree, real_nodes, node_to_way, location_info = query_results |
|
||||||
|
|
||||||
cur_pos = geodetic2ecef((lat, lon, 0)) |
|
||||||
nodes = tree.query_ball_point(cur_pos, 500) |
|
||||||
|
|
||||||
# If no nodes within 500m, choose closest one |
|
||||||
if not nodes: |
|
||||||
nodes = [tree.query(cur_pos)[1]] |
|
||||||
|
|
||||||
ways = [] |
|
||||||
for n in nodes: |
|
||||||
real_node = real_nodes[n] |
|
||||||
ways += node_to_way[real_node.id] |
|
||||||
ways = set(ways) |
|
||||||
|
|
||||||
closest_way = None |
|
||||||
best_score = None |
|
||||||
for way in ways: |
|
||||||
way = Way(way, query_results) |
|
||||||
points = way.points_in_car_frame(lat, lon, heading) |
|
||||||
|
|
||||||
on_way = way.on_way(lat, lon, heading, points) |
|
||||||
if not on_way: |
|
||||||
continue |
|
||||||
|
|
||||||
# Create mask of points in front and behind |
|
||||||
x = points[:, 0] |
|
||||||
y = points[:, 1] |
|
||||||
angles = np.arctan2(y, x) |
|
||||||
front = np.logical_and((-np.pi / 2) < angles, |
|
||||||
angles < (np.pi / 2)) |
|
||||||
behind = np.logical_not(front) |
|
||||||
|
|
||||||
dists = np.linalg.norm(points, axis=1) |
|
||||||
|
|
||||||
# Get closest point behind the car |
|
||||||
dists_behind = np.copy(dists) |
|
||||||
dists_behind[front] = np.NaN |
|
||||||
closest_behind = points[np.nanargmin(dists_behind)] |
|
||||||
|
|
||||||
# Get closest point in front of the car |
|
||||||
dists_front = np.copy(dists) |
|
||||||
dists_front[behind] = np.NaN |
|
||||||
closest_front = points[np.nanargmin(dists_front)] |
|
||||||
|
|
||||||
# fit line: y = a*x + b |
|
||||||
x1, y1, _ = closest_behind |
|
||||||
x2, y2, _ = closest_front |
|
||||||
a = (y2 - y1) / max((x2 - x1), 1e-5) |
|
||||||
b = y1 - a * x1 |
|
||||||
|
|
||||||
# With a factor of 60 a 20m offset causes the same error as a 20 degree heading error |
|
||||||
# (A 20 degree heading offset results in an a of about 1/3) |
|
||||||
score = abs(a) * 60. + abs(b) |
|
||||||
|
|
||||||
# Prefer same type of road |
|
||||||
if prev_way is not None: |
|
||||||
if way.way.tags.get('highway', '') == prev_way.way.tags.get('highway', ''): |
|
||||||
score *= 0.5 |
|
||||||
|
|
||||||
if closest_way is None or score < best_score: |
|
||||||
closest_way = way |
|
||||||
best_score = score |
|
||||||
|
|
||||||
# Normal score is < 5 |
|
||||||
if best_score > 50: |
|
||||||
return None |
|
||||||
|
|
||||||
return closest_way |
|
||||||
|
|
||||||
def __str__(self): |
|
||||||
return "%s %s" % (self.id, self.way.tags) |
|
||||||
|
|
||||||
def max_speed(self): |
|
||||||
"""Extracts the (conditional) speed limit from a way""" |
|
||||||
if not self.way: |
|
||||||
return None |
|
||||||
|
|
||||||
max_speed = parse_speed_tags(self.way.tags) |
|
||||||
if not max_speed: |
|
||||||
location_info = self.query_results[4] |
|
||||||
max_speed = geocode_maxspeed(self.way.tags, location_info) |
|
||||||
|
|
||||||
return max_speed |
|
||||||
|
|
||||||
def max_speed_ahead(self, current_speed_limit, lat, lon, heading, lookahead): |
|
||||||
"""Look ahead for a max speed""" |
|
||||||
if not self.way: |
|
||||||
return None |
|
||||||
|
|
||||||
speed_ahead = None |
|
||||||
speed_ahead_dist = None |
|
||||||
lookahead_ways = 5 |
|
||||||
way = self |
|
||||||
for i in range(lookahead_ways): |
|
||||||
way_pts = way.points_in_car_frame(lat, lon, heading) |
|
||||||
|
|
||||||
# Check current lookahead distance |
|
||||||
max_dist = np.linalg.norm(way_pts[-1, :]) |
|
||||||
|
|
||||||
if max_dist > 2 * lookahead: |
|
||||||
break |
|
||||||
|
|
||||||
if 'maxspeed' in way.way.tags: |
|
||||||
spd = parse_speed_tags(way.way.tags) |
|
||||||
if not spd: |
|
||||||
location_info = self.query_results[4] |
|
||||||
spd = geocode_maxspeed(way.way.tags, location_info) |
|
||||||
if spd < current_speed_limit: |
|
||||||
speed_ahead = spd |
|
||||||
min_dist = np.linalg.norm(way_pts[1, :]) |
|
||||||
speed_ahead_dist = min_dist |
|
||||||
break |
|
||||||
# Find next way |
|
||||||
way = way.next_way() |
|
||||||
if not way: |
|
||||||
break |
|
||||||
|
|
||||||
return speed_ahead, speed_ahead_dist |
|
||||||
|
|
||||||
def advisory_max_speed(self): |
|
||||||
if not self.way: |
|
||||||
return None |
|
||||||
|
|
||||||
tags = self.way.tags |
|
||||||
adv_speed = None |
|
||||||
|
|
||||||
if 'maxspeed:advisory' in tags: |
|
||||||
adv_speed = tags['maxspeed:advisory'] |
|
||||||
adv_speed = parse_speed_unit(adv_speed) |
|
||||||
return adv_speed |
|
||||||
|
|
||||||
def on_way(self, lat, lon, heading, points=None): |
|
||||||
if points is None: |
|
||||||
points = self.points_in_car_frame(lat, lon, heading) |
|
||||||
x = points[:, 0] |
|
||||||
return np.min(x) < 0. and np.max(x) > 0. |
|
||||||
|
|
||||||
def closest_point(self, lat, lon, heading, points=None): |
|
||||||
if points is None: |
|
||||||
points = self.points_in_car_frame(lat, lon, heading) |
|
||||||
i = np.argmin(np.linalg.norm(points, axis=1)) |
|
||||||
return points[i] |
|
||||||
|
|
||||||
def distance_to_closest_node(self, lat, lon, heading, points=None): |
|
||||||
if points is None: |
|
||||||
points = self.points_in_car_frame(lat, lon, heading) |
|
||||||
return np.min(np.linalg.norm(points, axis=1)) |
|
||||||
|
|
||||||
def points_in_car_frame(self, lat, lon, heading): |
|
||||||
lc = LocalCoord.from_geodetic([lat, lon, 0.]) |
|
||||||
|
|
||||||
# Build rotation matrix |
|
||||||
heading = math.radians(-heading + 90) |
|
||||||
c, s = np.cos(heading), np.sin(heading) |
|
||||||
rot = np.array([[c, s, 0.], [-s, c, 0.], [0., 0., 1.]]) |
|
||||||
|
|
||||||
# Convert to local coordinates |
|
||||||
points_carframe = lc.geodetic2ned(self.points).T |
|
||||||
|
|
||||||
# Rotate with heading of car |
|
||||||
points_carframe = np.dot(rot, points_carframe[(1, 0, 2), :]).T |
|
||||||
|
|
||||||
return points_carframe |
|
||||||
|
|
||||||
def next_way(self, backwards=False): |
|
||||||
results, tree, real_nodes, node_to_way, location_info = self.query_results |
|
||||||
|
|
||||||
if backwards: |
|
||||||
node = self.way.nodes[0] |
|
||||||
else: |
|
||||||
node = self.way.nodes[-1] |
|
||||||
|
|
||||||
ways = node_to_way[node.id] |
|
||||||
|
|
||||||
way = None |
|
||||||
try: |
|
||||||
# Simple heuristic to find next way |
|
||||||
ways = [w for w in ways if w.id != self.id] |
|
||||||
ways = [w for w in ways if w.nodes[0] == node] |
|
||||||
|
|
||||||
# Filter on highway tag |
|
||||||
acceptable_tags = list() |
|
||||||
cur_tag = self.way.tags['highway'] |
|
||||||
acceptable_tags.append(cur_tag) |
|
||||||
if cur_tag == 'motorway_link': |
|
||||||
acceptable_tags.append('motorway') |
|
||||||
acceptable_tags.append('trunk') |
|
||||||
acceptable_tags.append('primary') |
|
||||||
ways = [w for w in ways if w.tags['highway'] in acceptable_tags] |
|
||||||
|
|
||||||
# Filter on number of lanes |
|
||||||
cur_num_lanes = int(self.way.tags['lanes']) |
|
||||||
if len(ways) > 1: |
|
||||||
ways_same_lanes = [w for w in ways if int(w.tags['lanes']) == cur_num_lanes] |
|
||||||
if len(ways_same_lanes) == 1: |
|
||||||
ways = ways_same_lanes |
|
||||||
if len(ways) > 1: |
|
||||||
ways = [w for w in ways if int(w.tags['lanes']) > cur_num_lanes] |
|
||||||
if len(ways) == 1: |
|
||||||
way = Way(ways[0], self.query_results) |
|
||||||
|
|
||||||
except (KeyError, ValueError): |
|
||||||
pass |
|
||||||
|
|
||||||
return way |
|
||||||
|
|
||||||
def get_lookahead(self, lat, lon, heading, lookahead): |
|
||||||
pnts = None |
|
||||||
way = self |
|
||||||
valid = False |
|
||||||
|
|
||||||
for i in range(5): |
|
||||||
# Get new points and append to list |
|
||||||
new_pnts = way.points_in_car_frame(lat, lon, heading) |
|
||||||
|
|
||||||
if pnts is None: |
|
||||||
pnts = new_pnts |
|
||||||
else: |
|
||||||
pnts = np.vstack([pnts, new_pnts]) |
|
||||||
|
|
||||||
# Check current lookahead distance |
|
||||||
max_dist = np.linalg.norm(pnts[-1, :]) |
|
||||||
if max_dist > lookahead: |
|
||||||
valid = True |
|
||||||
|
|
||||||
if max_dist > 2 * lookahead: |
|
||||||
break |
|
||||||
|
|
||||||
# Find next way |
|
||||||
way = way.next_way() |
|
||||||
if not way: |
|
||||||
break |
|
||||||
|
|
||||||
return pnts, valid |
|
@ -1,3 +1,3 @@ |
|||||||
version https://git-lfs.github.com/spec/v1 |
version https://git-lfs.github.com/spec/v1 |
||||||
oid sha256:125944cbfd9300a663ec5c01c48a67b3bdb0d64d253423656c47f3dcc7886e68 |
oid sha256:9ba2abd00e366eafd598ef10054783de945fb21f22d4c2780e1e4c20d28cdc93 |
||||||
size 1171544 |
size 1171544 |
||||||
|
@ -1,3 +1,3 @@ |
|||||||
version https://git-lfs.github.com/spec/v1 |
version https://git-lfs.github.com/spec/v1 |
||||||
oid sha256:43447776956dfc2b120ee525b489dc8a9090bc02975130ee6bcaa8557356de52 |
oid sha256:010fbcc81ddc121424d688568d98bbcbe1f5671ce8af21dba424b89d84e3b214 |
||||||
size 1159016 |
size 1159016 |
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue