parent
92aa7e7c96
commit
fa4b6f33d1
101 changed files with 3665 additions and 2619 deletions
@ -1,3 +1,3 @@ |
||||
version https://git-lfs.github.com/spec/v1 |
||||
oid sha256:5e6997ef9a2f37fb6783d0b41c6d85b8c275e916f0e66dcbd8b1050461892852 |
||||
size 2599 |
||||
oid sha256:a1fd3e30b499e9e5387495544631cb334b62521a5be6668b98d52e3ef5d6e448 |
||||
size 2308 |
||||
|
@ -1,3 +1,3 @@ |
||||
version https://git-lfs.github.com/spec/v1 |
||||
oid sha256:67e035ae5f7a07977f9839dcff6ff49189f5742e2e2e92977b2cfc0e041189df |
||||
size 168833 |
||||
oid sha256:451e503913e7579c3241b5fec46c81f832735ecd9fc0ec6dffdf246852e146d1 |
||||
size 165792 |
||||
|
@ -1,3 +1,3 @@ |
||||
version https://git-lfs.github.com/spec/v1 |
||||
oid sha256:8b20fb584bc1cc3637e5cb58b6688e9e250bb7f9eacb8b6e0d50a890e79d7797 |
||||
size 2848396 |
||||
oid sha256:1e7c9d41c6950dbc0dc4f5ab6c2bf82a1795738df5904e144aa36e496bd6fb33 |
||||
size 2850290 |
||||
|
@ -1,3 +1,3 @@ |
||||
version https://git-lfs.github.com/spec/v1 |
||||
oid sha256:7a35f3ee4353210c0cfe714653920c05c588fb40c6c62088f81cd02bfc7eb6d3 |
||||
size 16150762 |
||||
oid sha256:74b02b4d5eaa3b087a3820249ef64294984c87be6b7f561d3d9b0bf746fcced1 |
||||
size 16726329 |
||||
|
@ -1,7 +1,7 @@ |
||||
{ |
||||
"ota_url": "https://commadist.azureedge.net/neosupdate/ota-signed-07df505453684371b6c22583ffbb74ee414fcd389a46ff369ffd1b6bac75414e.zip", |
||||
"ota_hash": "07df505453684371b6c22583ffbb74ee414fcd389a46ff369ffd1b6bac75414e", |
||||
"recovery_url": "https://commadist.azureedge.net/neosupdate/recovery-3a6f973295ded6e4ff5cfff3b12e19c80d3bf45e2e8dd8699da3fc25b23ed7c6.img", |
||||
"recovery_len": 15848748, |
||||
"recovery_hash": "3a6f973295ded6e4ff5cfff3b12e19c80d3bf45e2e8dd8699da3fc25b23ed7c6" |
||||
"ota_url": "https://commadist.azureedge.net/neosupdate/ota-signed-efdf7de63b1aef63d68301e6175930991bf9a5927d16ec6fcc69287e2ee7ca4a.zip", |
||||
"ota_hash": "efdf7de63b1aef63d68301e6175930991bf9a5927d16ec6fcc69287e2ee7ca4a", |
||||
"recovery_url": "https://commadist.azureedge.net/neosupdate/recovery-97c27e6ed04ed6bb0608b845a2d4100912093f9380c3f2ba6b56bccd608e5f6e.img", |
||||
"recovery_len": 15861036, |
||||
"recovery_hash": "97c27e6ed04ed6bb0608b845a2d4100912093f9380c3f2ba6b56bccd608e5f6e" |
||||
} |
||||
|
@ -1,3 +0,0 @@ |
||||
version https://git-lfs.github.com/spec/v1 |
||||
oid sha256:53948734c96a5aee45fcaed9fd6191056328ed3467dcd3d40d25f310d38c2297 |
||||
size 15207664 |
@ -1,3 +1,3 @@ |
||||
version https://git-lfs.github.com/spec/v1 |
||||
oid sha256:7f6305c44138a387e1757ed30a9bb6487d35c8919d0ae258998acb0e74b584e1 |
||||
oid sha256:122b76f6b87759b82cfc57db222df7d852d8f8aca4abb56108b474b18304b936 |
||||
size 186615 |
||||
|
@ -1,3 +0,0 @@ |
||||
version https://git-lfs.github.com/spec/v1 |
||||
oid sha256:23a8d66c0b1de9c29f1cd9a74beeba9a67fd6e27392a9971f9771cc387125061 |
||||
size 5303349 |
@ -0,0 +1,3 @@ |
||||
version https://git-lfs.github.com/spec/v1 |
||||
oid sha256:7e36d90b6ff6aaf655e57aed5c907eea55ad30806f0720c7e69559fa5e901592 |
||||
size 26395446 |
Binary file not shown.
@ -0,0 +1,7 @@ |
||||
#!/usr/bin/env sh |
||||
|
||||
# Stop updater |
||||
pkill -2 -f selfdrive.updated |
||||
|
||||
# Remove pending update |
||||
rm -f /data/safe_staging/finalized/.overlay_consistent |
@ -0,0 +1,4 @@ |
||||
#!/usr/bin/env sh |
||||
|
||||
# Send SIGHUP to updater |
||||
pkill -1 -f selfdrive.updated |
@ -0,0 +1,192 @@ |
||||
#!/usr/bin/env python3 |
||||
import traceback |
||||
import struct |
||||
from tqdm import tqdm |
||||
|
||||
from selfdrive.car.isotp_parallel_query import IsoTpParallelQuery |
||||
from selfdrive.swaglog import cloudlog |
||||
from selfdrive.car.fingerprints import FW_VERSIONS |
||||
import panda.python.uds as uds |
||||
|
||||
from cereal import car |
||||
Ecu = car.CarParams.Ecu |
||||
|
||||
def p16(val): |
||||
return struct.pack("!H", val) |
||||
|
||||
TESTER_PRESENT_REQUEST = bytes([uds.SERVICE_TYPE.TESTER_PRESENT, 0x0]) |
||||
TESTER_PRESENT_RESPONSE = bytes([uds.SERVICE_TYPE.TESTER_PRESENT + 0x40, 0x0]) |
||||
|
||||
SHORT_TESTER_PRESENT_REQUEST = bytes([uds.SERVICE_TYPE.TESTER_PRESENT]) |
||||
SHORT_TESTER_PRESENT_RESPONSE = bytes([uds.SERVICE_TYPE.TESTER_PRESENT + 0x40]) |
||||
|
||||
DEFAULT_DIAGNOSTIC_REQUEST = bytes([uds.SERVICE_TYPE.DIAGNOSTIC_SESSION_CONTROL, |
||||
uds.SESSION_TYPE.DEFAULT]) |
||||
DEFAULT_DIAGNOSTIC_RESPONSE = bytes([uds.SERVICE_TYPE.DIAGNOSTIC_SESSION_CONTROL + 0x40, |
||||
uds.SESSION_TYPE.DEFAULT, 0x0, 0x32, 0x1, 0xf4]) |
||||
|
||||
EXTENDED_DIAGNOSTIC_REQUEST = bytes([uds.SERVICE_TYPE.DIAGNOSTIC_SESSION_CONTROL, |
||||
uds.SESSION_TYPE.EXTENDED_DIAGNOSTIC]) |
||||
EXTENDED_DIAGNOSTIC_RESPONSE = bytes([uds.SERVICE_TYPE.DIAGNOSTIC_SESSION_CONTROL + 0x40, |
||||
uds.SESSION_TYPE.EXTENDED_DIAGNOSTIC, 0x0, 0x32, 0x1, 0xf4]) |
||||
|
||||
UDS_VERSION_REQUEST = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER]) + \ |
||||
p16(uds.DATA_IDENTIFIER_TYPE.APPLICATION_SOFTWARE_IDENTIFICATION) |
||||
UDS_VERSION_RESPONSE = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER + 0x40]) + \ |
||||
p16(uds.DATA_IDENTIFIER_TYPE.APPLICATION_SOFTWARE_IDENTIFICATION) |
||||
|
||||
TOYOTA_VERSION_REQUEST = b'\x1a\x88\x01' |
||||
TOYOTA_VERSION_RESPONSE = b'\x5a\x88\x01' |
||||
|
||||
OBD_VERSION_REQUEST = b'\x09\x04' |
||||
OBD_VERSION_RESPONSE = b'\x49\x04' |
||||
|
||||
|
||||
REQUESTS = [ |
||||
# Honda |
||||
( |
||||
[UDS_VERSION_REQUEST], |
||||
[UDS_VERSION_RESPONSE] |
||||
), |
||||
# Toyota |
||||
( |
||||
[SHORT_TESTER_PRESENT_REQUEST, TOYOTA_VERSION_REQUEST], |
||||
[SHORT_TESTER_PRESENT_RESPONSE, TOYOTA_VERSION_RESPONSE] |
||||
), |
||||
( |
||||
[SHORT_TESTER_PRESENT_REQUEST, OBD_VERSION_REQUEST], |
||||
[SHORT_TESTER_PRESENT_RESPONSE, OBD_VERSION_RESPONSE] |
||||
), |
||||
( |
||||
[TESTER_PRESENT_REQUEST, DEFAULT_DIAGNOSTIC_REQUEST, EXTENDED_DIAGNOSTIC_REQUEST, UDS_VERSION_REQUEST], |
||||
[TESTER_PRESENT_RESPONSE, DEFAULT_DIAGNOSTIC_RESPONSE, EXTENDED_DIAGNOSTIC_RESPONSE, UDS_VERSION_RESPONSE] |
||||
) |
||||
] |
||||
|
||||
def chunks(l, n=128): |
||||
for i in range(0, len(l), n): |
||||
yield l[i:i + n] |
||||
|
||||
def match_fw_to_car(fw_versions): |
||||
candidates = FW_VERSIONS |
||||
invalid = [] |
||||
|
||||
for candidate, fws in candidates.items(): |
||||
for ecu, expected_versions in fws.items(): |
||||
ecu_type = ecu[0] |
||||
addr = ecu[1:] |
||||
|
||||
found_version = fw_versions.get(addr, None) |
||||
|
||||
# Allow DSU not being present |
||||
if ecu_type in [Ecu.unknown, Ecu.dsu] and found_version is None: |
||||
continue |
||||
|
||||
if found_version not in expected_versions: |
||||
invalid.append(candidate) |
||||
break |
||||
|
||||
return set(candidates.keys()) - set(invalid) |
||||
|
||||
|
||||
def get_fw_versions(logcan, sendcan, bus, extra=None, timeout=0.1, debug=False, progress=False): |
||||
ecu_types = {} |
||||
|
||||
# Extract ECU adresses to query from fingerprints |
||||
# ECUs using a subadress need be queried one by one, the rest can be done in parallel |
||||
addrs = [] |
||||
parallel_addrs = [] |
||||
|
||||
versions = FW_VERSIONS |
||||
if extra is not None: |
||||
versions.update(extra) |
||||
|
||||
for c in versions.values(): |
||||
for ecu_type, addr, sub_addr in c.keys(): |
||||
a = (addr, sub_addr) |
||||
if a not in ecu_types: |
||||
ecu_types[a] = ecu_type |
||||
|
||||
if sub_addr is None: |
||||
parallel_addrs.append(a) |
||||
else: |
||||
addrs.append([a]) |
||||
addrs.insert(0, parallel_addrs) |
||||
|
||||
fw_versions = {} |
||||
for i, addr in enumerate(tqdm(addrs, disable=not progress)): |
||||
for addr_chunk in chunks(addr): |
||||
for request, response in REQUESTS: |
||||
try: |
||||
query = IsoTpParallelQuery(sendcan, logcan, bus, addr_chunk, request, response, debug=debug) |
||||
t = 2 * timeout if i == 0 else timeout |
||||
fw_versions.update(query.get_data(t)) |
||||
except Exception: |
||||
cloudlog.warning(f"FW query exception: {traceback.format_exc()}") |
||||
|
||||
# Build capnp list to put into CarParams |
||||
car_fw = [] |
||||
for addr, version in fw_versions.items(): |
||||
f = car.CarParams.CarFw.new_message() |
||||
|
||||
f.ecu = ecu_types[addr] |
||||
f.fwVersion = version |
||||
f.address = addr[0] |
||||
|
||||
if addr[1] is not None: |
||||
f.subAddress = addr[1] |
||||
|
||||
car_fw.append(f) |
||||
|
||||
candidates = match_fw_to_car(fw_versions) |
||||
return candidates, car_fw |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
import time |
||||
import argparse |
||||
import cereal.messaging as messaging |
||||
from selfdrive.car.vin import get_vin |
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Get firmware version of ECUs') |
||||
parser.add_argument('--scan', action='store_true') |
||||
parser.add_argument('--debug', action='store_true') |
||||
args = parser.parse_args() |
||||
|
||||
logcan = messaging.sub_sock('can') |
||||
sendcan = messaging.pub_sock('sendcan') |
||||
|
||||
extra = None |
||||
if args.scan: |
||||
extra = {"DEBUG": {}} |
||||
# Honda |
||||
for i in range(256): |
||||
extra["DEBUG"][(Ecu.unknown, 0x18da00f1 + (i << 8), None)] = [] |
||||
extra["DEBUG"][(Ecu.unknown, 0x700 + i, None)] = [] |
||||
extra["DEBUG"][(Ecu.unknown, 0x750, i)] = [] |
||||
|
||||
time.sleep(1.) |
||||
|
||||
t = time.time() |
||||
print("Getting vin...") |
||||
addr, vin = get_vin(logcan, sendcan, 1, retry=10, debug=args.debug) |
||||
print(f"VIN: {vin}") |
||||
print("Getting VIN took %.3f s" % (time.time() - t)) |
||||
print() |
||||
|
||||
t = time.time() |
||||
candidates, fw_vers = get_fw_versions(logcan, sendcan, 1, extra=extra, debug=args.debug, progress=True) |
||||
|
||||
print() |
||||
print("Found FW versions") |
||||
print("{") |
||||
for version in fw_vers: |
||||
subaddr = None if version.subAddress == 0 else hex(version.subAddress) |
||||
print(f" (Ecu.{version.ecu}, {hex(version.address)}, {subaddr}): [{version.fwVersion}]") |
||||
print("}") |
||||
|
||||
|
||||
print() |
||||
print("Possible matches:", candidates) |
||||
print("Getting fw took %.3f s" % (time.time() - t)) |
@ -0,0 +1,128 @@ |
||||
import time |
||||
from collections import defaultdict |
||||
from functools import partial |
||||
|
||||
import cereal.messaging as messaging |
||||
from selfdrive.swaglog import cloudlog |
||||
from selfdrive.boardd.boardd import can_list_to_can_capnp |
||||
from panda.python.uds import CanClient, IsoTpMessage, FUNCTIONAL_ADDRS, get_rx_addr_for_tx_addr |
||||
|
||||
|
||||
class IsoTpParallelQuery(): |
||||
def __init__(self, sendcan, logcan, bus, addrs, request, response, functional_addr=False, debug=False): |
||||
self.sendcan = sendcan |
||||
self.logcan = logcan |
||||
self.bus = bus |
||||
self.request = request |
||||
self.response = response |
||||
self.debug = debug |
||||
self.functional_addr = functional_addr |
||||
|
||||
self.real_addrs = [] |
||||
for a in addrs: |
||||
if isinstance(a, tuple): |
||||
self.real_addrs.append(a) |
||||
else: |
||||
self.real_addrs.append((a, None)) |
||||
|
||||
self.msg_addrs = {tx_addr: get_rx_addr_for_tx_addr(tx_addr[0]) for tx_addr in self.real_addrs} |
||||
self.msg_buffer = defaultdict(list) |
||||
|
||||
def rx(self): |
||||
"""Drain can socket and sort messages into buffers based on address""" |
||||
can_packets = messaging.drain_sock(self.logcan, wait_for_one=True) |
||||
|
||||
for packet in can_packets: |
||||
for msg in packet.can: |
||||
if msg.src == self.bus: |
||||
if self.functional_addr: |
||||
if (0x7E8 <= msg.address <= 0x7EF) or (0x18DAF100 <= msg.address <= 0x18DAF1FF): |
||||
fn_addr = next(a for a in FUNCTIONAL_ADDRS if msg.address - a <= 32) |
||||
self.msg_buffer[fn_addr].append((msg.address, msg.busTime, msg.dat, msg.src)) |
||||
elif msg.address in self.msg_addrs.values(): |
||||
self.msg_buffer[msg.address].append((msg.address, msg.busTime, msg.dat, msg.src)) |
||||
|
||||
def _can_tx(self, tx_addr, dat, bus): |
||||
"""Helper function to send single message""" |
||||
msg = [tx_addr, 0, dat, bus] |
||||
self.sendcan.send(can_list_to_can_capnp([msg], msgtype='sendcan')) |
||||
|
||||
def _can_rx(self, addr, sub_addr=None): |
||||
"""Helper function to retrieve message with specified address and subadress from buffer""" |
||||
keep_msgs = [] |
||||
|
||||
if sub_addr is None: |
||||
msgs = self.msg_buffer[addr] |
||||
else: |
||||
# Filter based on subadress |
||||
msgs = [] |
||||
for m in self.msg_buffer[addr]: |
||||
first_byte = m[2][0] |
||||
if first_byte == sub_addr: |
||||
msgs.append(m) |
||||
else: |
||||
keep_msgs.append(m) |
||||
|
||||
self.msg_buffer[addr] = keep_msgs |
||||
return msgs |
||||
|
||||
def _drain_rx(self): |
||||
messaging.drain_sock(self.logcan) |
||||
self.msg_buffer = defaultdict(list) |
||||
|
||||
def get_data(self, timeout): |
||||
self._drain_rx() |
||||
|
||||
# Create message objects |
||||
msgs = {} |
||||
request_counter = {} |
||||
request_done = {} |
||||
for tx_addr, rx_addr in self.msg_addrs.items(): |
||||
# rx_addr not set when using functional tx addr |
||||
id_addr = rx_addr or tx_addr[0] |
||||
sub_addr = tx_addr[1] |
||||
|
||||
can_client = CanClient(self._can_tx, partial(self._can_rx, id_addr, sub_addr=sub_addr), tx_addr[0], rx_addr, self.bus, sub_addr=sub_addr, debug=self.debug) |
||||
|
||||
max_len = 8 if sub_addr is None else 7 |
||||
|
||||
msg = IsoTpMessage(can_client, timeout=0, max_len=max_len, debug=self.debug) |
||||
msg.send(self.request[0]) |
||||
|
||||
msgs[tx_addr] = msg |
||||
request_counter[tx_addr] = 0 |
||||
request_done[tx_addr] = False |
||||
|
||||
results = {} |
||||
start_time = time.time() |
||||
while True: |
||||
self.rx() |
||||
|
||||
if all(request_done.values()): |
||||
break |
||||
|
||||
for tx_addr, msg in msgs.items(): |
||||
dat = msg.recv() |
||||
|
||||
if not dat: |
||||
continue |
||||
|
||||
counter = request_counter[tx_addr] |
||||
expected_response = self.response[counter] |
||||
response_valid = dat[:len(expected_response)] == expected_response |
||||
|
||||
if response_valid: |
||||
if counter + 1 < len(self.request): |
||||
msg.send(self.request[counter + 1]) |
||||
request_counter[tx_addr] += 1 |
||||
else: |
||||
results[tx_addr] = dat[len(expected_response):] |
||||
request_done[tx_addr] = True |
||||
else: |
||||
request_done[tx_addr] = True |
||||
cloudlog.warning(f"iso-tp query bad response: 0x{bytes.hex(dat)}") |
||||
|
||||
if time.time() - start_time > timeout: |
||||
break |
||||
|
||||
return results |
@ -1,104 +1,33 @@ |
||||
#!/usr/bin/env python3 |
||||
import traceback |
||||
|
||||
import cereal.messaging as messaging |
||||
from selfdrive.boardd.boardd import can_list_to_can_capnp |
||||
from panda.python.uds import FUNCTIONAL_ADDRS |
||||
from selfdrive.car.isotp_parallel_query import IsoTpParallelQuery |
||||
from selfdrive.swaglog import cloudlog |
||||
|
||||
VIN_REQUEST = b'\x09\x02' |
||||
VIN_RESPONSE = b'\x49\x02\x01' |
||||
VIN_UNKNOWN = "0" * 17 |
||||
|
||||
# sanity checks on response messages from vin query |
||||
def is_vin_response_valid(can_dat, step, cnt): |
||||
if len(can_dat) != 8: |
||||
# ISO-TP meesages are all 8 bytes |
||||
return False |
||||
|
||||
if step == 0: |
||||
# VIN does not fit in a single message and it's 20 bytes of data |
||||
if can_dat[0] != 0x10 or can_dat[1] != 0x14: |
||||
return False |
||||
|
||||
if step == 1 and cnt == 0: |
||||
# first response after a CONTINUE query is sent |
||||
if can_dat[0] != 0x21: |
||||
return False |
||||
|
||||
if step == 1 and cnt == 1: |
||||
# second response after a CONTINUE query is sent |
||||
if can_dat[0] != 0x22: |
||||
return False |
||||
|
||||
return True |
||||
|
||||
|
||||
class VinQuery(): |
||||
def __init__(self, bus): |
||||
self.bus = bus |
||||
# works on standard 11-bit addresses for diagnostic. Tested on Toyota and Subaru; |
||||
# Honda uses the extended 29-bit addresses, and unfortunately only works from OBDII |
||||
self.query_ext_msgs = [[0x18DB33F1, 0, b'\x02\x09\x02'.ljust(8, b"\x00"), bus], |
||||
[0x18DA10f1, 0, b'\x30'.ljust(8, b"\x00"), bus]] |
||||
self.query_nor_msgs = [[0x7df, 0, b'\x02\x09\x02'.ljust(8, b"\x00"), bus], |
||||
[0x7e0, 0, b'\x30'.ljust(8, b"\x00"), bus]] |
||||
|
||||
self.cnts = [1, 2] # number of messages to wait for at each iteration |
||||
self.step = 0 |
||||
self.cnt = 0 |
||||
self.responded = False |
||||
self.never_responded = True |
||||
self.dat = b"" |
||||
self.got_vin = False |
||||
self.vin = VIN_UNKNOWN |
||||
|
||||
def check_response(self, msg): |
||||
# have we got a VIN query response? |
||||
if msg.src == self.bus and msg.address in [0x18daf110, 0x7e8]: |
||||
self.never_responded = False |
||||
# basic sanity checks on ISO-TP response |
||||
if is_vin_response_valid(msg.dat, self.step, self.cnt): |
||||
self.dat += bytes(msg.dat[2:]) if self.step == 0 else bytes(msg.dat[1:]) |
||||
self.cnt += 1 |
||||
if self.cnt == self.cnts[self.step]: |
||||
self.responded = True |
||||
self.step += 1 |
||||
if self.step == len(self.cnts): |
||||
self.got_vin = True |
||||
|
||||
def send_query(self, sendcan): |
||||
# keep sending VIN query if ECU isn't responsing. |
||||
# sendcan is probably not ready due to the zmq slow joiner syndrome |
||||
if self.never_responded or (self.responded and not self.got_vin): |
||||
sendcan.send(can_list_to_can_capnp([self.query_ext_msgs[self.step]], msgtype='sendcan')) |
||||
sendcan.send(can_list_to_can_capnp([self.query_nor_msgs[self.step]], msgtype='sendcan')) |
||||
self.responded = False |
||||
self.cnt = 0 |
||||
def get_vin(logcan, sendcan, bus, timeout=0.1, retry=5, debug=False): |
||||
for i in range(retry): |
||||
try: |
||||
query = IsoTpParallelQuery(sendcan, logcan, bus, FUNCTIONAL_ADDRS, [VIN_REQUEST], [VIN_RESPONSE], functional_addr=True, debug=debug) |
||||
for addr, vin in query.get_data(timeout).items(): |
||||
return addr[0], vin.decode() |
||||
print(f"vin query retry ({i+1}) ...") |
||||
except Exception: |
||||
cloudlog.warning(f"VIN query exception: {traceback.format_exc()}") |
||||
|
||||
def get_vin(self): |
||||
if self.got_vin: |
||||
try: |
||||
self.vin = self.dat[3:].decode('utf8') |
||||
except UnicodeDecodeError: |
||||
pass # have seen unexpected non-unicode characters |
||||
return self.vin |
||||
|
||||
|
||||
def get_vin(logcan, sendcan, bus, query_time=1.): |
||||
vin_query = VinQuery(bus) |
||||
frame = 0 |
||||
|
||||
# 1s max of VIN query time |
||||
while frame < query_time * 100 and not vin_query.got_vin: |
||||
a = messaging.get_one_can(logcan) |
||||
|
||||
for can in a.can: |
||||
vin_query.check_response(can) |
||||
if vin_query.got_vin: |
||||
break |
||||
|
||||
vin_query.send_query(sendcan) |
||||
frame += 1 |
||||
|
||||
return vin_query.get_vin() |
||||
return 0, VIN_UNKNOWN |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
logcan = messaging.sub_sock('can') |
||||
import time |
||||
sendcan = messaging.pub_sock('sendcan') |
||||
print(get_vin(logcan, sendcan, 0)) |
||||
logcan = messaging.sub_sock('can') |
||||
time.sleep(1) |
||||
addr, vin = get_vin(logcan, sendcan, 1, debug=False) |
||||
print(hex(addr), vin) |
||||
|
@ -0,0 +1 @@ |
||||
clocksd |
@ -0,0 +1,2 @@ |
||||
Import('env', 'common', 'messaging') |
||||
env.Program('clocksd.cc', LIBS=['diag', 'time_genoff', common, messaging, 'capnp', 'zmq', 'kj']) |
@ -0,0 +1,72 @@ |
||||
#include <stdio.h> |
||||
#include <stdint.h> |
||||
#include <sys/resource.h> |
||||
#include <sys/timerfd.h> |
||||
#include <sys/time.h> |
||||
#include <utils/Timers.h> |
||||
#include <capnp/serialize.h> |
||||
#include "messaging.hpp" |
||||
#include "common/timing.h" |
||||
#include "cereal/gen/cpp/log.capnp.h" |
||||
|
||||
namespace { |
||||
int64_t arm_cntpct() { |
||||
int64_t v; |
||||
asm volatile("mrs %0, cntpct_el0" : "=r"(v)); |
||||
return v; |
||||
} |
||||
} |
||||
|
||||
int main() { |
||||
setpriority(PRIO_PROCESS, 0, -13); |
||||
|
||||
int err = 0; |
||||
Context *context = Context::create(); |
||||
|
||||
PubSocket* clock_publisher = PubSocket::create(context, "clocks"); |
||||
assert(clock_publisher != NULL); |
||||
|
||||
int timerfd = timerfd_create(CLOCK_BOOTTIME, 0); |
||||
assert(timerfd >= 0); |
||||
|
||||
struct itimerspec spec = {0}; |
||||
spec.it_interval.tv_sec = 1; |
||||
spec.it_interval.tv_nsec = 0; |
||||
spec.it_value.tv_sec = 1; |
||||
spec.it_value.tv_nsec = 0; |
||||
|
||||
err = timerfd_settime(timerfd, 0, &spec, 0); |
||||
assert(err == 0); |
||||
|
||||
uint64_t expirations = 0; |
||||
while ((err = read(timerfd, &expirations, sizeof(expirations)))) { |
||||
if (err < 0) break; |
||||
|
||||
uint64_t boottime = nanos_since_boot(); |
||||
uint64_t monotonic = nanos_monotonic(); |
||||
uint64_t monotonic_raw = nanos_monotonic_raw(); |
||||
uint64_t wall_time = nanos_since_epoch(); |
||||
|
||||
uint64_t modem_uptime_v = arm_cntpct() / 19200ULL; // 19.2 mhz clock
|
||||
|
||||
capnp::MallocMessageBuilder msg; |
||||
cereal::Event::Builder event = msg.initRoot<cereal::Event>(); |
||||
event.setLogMonoTime(boottime); |
||||
auto clocks = event.initClocks(); |
||||
|
||||
clocks.setBootTimeNanos(boottime); |
||||
clocks.setMonotonicNanos(monotonic); |
||||
clocks.setMonotonicRawNanos(monotonic_raw); |
||||
clocks.setWallTimeNanos(wall_time); |
||||
clocks.setModemUptimeMillis(modem_uptime_v); |
||||
|
||||
auto words = capnp::messageToFlatArray(msg); |
||||
auto bytes = words.asBytes(); |
||||
clock_publisher->send((char*)bytes.begin(), bytes.size()); |
||||
} |
||||
|
||||
close(timerfd); |
||||
delete clock_publisher; |
||||
|
||||
return 0; |
||||
} |
@ -1 +1 @@ |
||||
#define COMMA_VERSION "0.7-release" |
||||
#define COMMA_VERSION "0.7.1-release" |
||||
|
@ -1,6 +1,12 @@ |
||||
Import('env', 'messaging', 'common', 'visionipc') |
||||
env.Program(['loggerd.cc', 'logger.c', 'raw_logger.cc', 'encoder.c'], LIBS=[ |
||||
'zmq', 'czmq', 'capnp', 'kj', 'yaml-cpp', 'z', |
||||
Import('env', 'arch', 'messaging', 'common', 'visionipc') |
||||
|
||||
src = ['loggerd.cc', 'logger.c'] |
||||
libs = ['zmq', 'czmq', 'capnp', 'kj', 'yaml-cpp', 'z', |
||||
'avformat', 'avcodec', 'swscale', 'avutil', |
||||
'OmxVenc', 'OmxCore', 'yuv', |
||||
'bz2', 'cutils', common, 'json', messaging, visionipc]) |
||||
'yuv', 'bz2', common, 'json', messaging, visionipc] |
||||
|
||||
if arch == "aarch64": |
||||
src += ['encoder.c', 'raw_logger.cc'] |
||||
libs += ['OmxVenc', 'OmxCore', 'cutils'] |
||||
|
||||
env.Program(src, LIBS=libs) |
||||
|
@ -1,25 +1,36 @@ |
||||
Import('env', 'arch', 'messaging', 'common', 'gpucommon', 'visionipc') |
||||
lenv = env.Clone() |
||||
|
||||
libs = [messaging, common, 'OpenCL', 'SNPE', 'capnp', 'zmq', 'kj', 'yuv', gpucommon, visionipc] |
||||
|
||||
common_src = [ |
||||
"models/commonmodel.c", |
||||
"runners/snpemodel.cc", |
||||
"transforms/loadyuv.c", |
||||
"transforms/transform.c"] |
||||
|
||||
if arch == "aarch64": |
||||
libs += ['gsl', 'CB', 'gnustl_shared'] |
||||
else: |
||||
libs += ['symphony-cpu', 'pthread'] |
||||
|
||||
common = env.Object([ |
||||
"models/commonmodel.c", |
||||
"runners/snpemodel.cc", |
||||
"transforms/loadyuv.c", |
||||
"transforms/transform.c"]) |
||||
if FindFile('libtensorflow.so', env['LIBPATH']): |
||||
# for tensorflow support |
||||
common_src += ['runners/tfmodel.cc'] |
||||
libs += ['tensorflow'] |
||||
# tell runners to use it |
||||
lenv['CFLAGS'].append("-DUSE_TF_MODEL") |
||||
lenv['CXXFLAGS'].append("-DUSE_TF_MODEL") |
||||
|
||||
common = lenv.Object(common_src) |
||||
|
||||
env.Program('_monitoringd', [ |
||||
lenv.Program('_monitoringd', [ |
||||
"monitoringd.cc", |
||||
"models/monitoring.cc", |
||||
]+common, LIBS=libs) |
||||
|
||||
env.Program('_modeld', [ |
||||
lenv.Program('_modeld', [ |
||||
"modeld.cc", |
||||
"models/driving.cc", |
||||
"models/posenet.cc", |
||||
]+common, LIBS=libs) |
||||
|
||||
|
@ -1,4 +1,4 @@ |
||||
#!/bin/sh |
||||
export LD_LIBRARY_PATH="/data/pythonpath/phonelibs/snpe/aarch64-android-clang3.8/:$LD_LIBRARY_PATH" |
||||
export LD_LIBRARY_PATH="/data/pythonpath/phonelibs/snpe/aarch64-android-clang3.8/:/home/batman/one/phonelibs/snpe/x86_64-linux-clang:$LD_LIBRARY_PATH" |
||||
exec ./_modeld |
||||
|
||||
|
@ -1,58 +0,0 @@ |
||||
#include <string.h> |
||||
#include <math.h> |
||||
#include "posenet.h" |
||||
|
||||
void posenet_init(PosenetState *s) { |
||||
s->input = (float*)malloc(2*200*532*sizeof(float)); |
||||
s->m = new DefaultRunModel("../../models/posenet.dlc", s->output, sizeof(s->output)/sizeof(float), USE_GPU_RUNTIME); |
||||
} |
||||
|
||||
void posenet_push(PosenetState *s, uint8_t *yuv_ptr_y, int yuv_width) { |
||||
// move second frame to first frame
|
||||
memmove(&s->input[0], &s->input[1], sizeof(float)*(200*532*2 - 1)); |
||||
|
||||
// fill posenet input
|
||||
float a; |
||||
// posenet uses a half resolution cropped frame
|
||||
// with upper left corner: [50, 237] and
|
||||
// bottom right corner: [1114, 637]
|
||||
// So the resulting crop is 532 X 200
|
||||
for (int y=237; y<637; y+=2) { |
||||
int yy = (y-237)/2; |
||||
for (int x = 50; x < 1114; x+=2) { |
||||
int xx = (x-50)/2; |
||||
a = 0; |
||||
a += yuv_ptr_y[yuv_width*(y+0) + (x+1)]; |
||||
a += yuv_ptr_y[yuv_width*(y+1) + (x+1)]; |
||||
a += yuv_ptr_y[yuv_width*(y+0) + (x+0)]; |
||||
a += yuv_ptr_y[yuv_width*(y+1) + (x+0)]; |
||||
// The posenet takes a normalized image input
|
||||
// like the driving model so [0,255] is remapped
|
||||
// to [-1,1]
|
||||
s->input[(yy*532+xx)*2 + 1] = (a/512.0 - 1.0); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void posenet_eval(PosenetState *s) { |
||||
s->m->execute(s->input); |
||||
|
||||
// fix stddevs
|
||||
for (int i = 6; i < 12; i++) { |
||||
s->output[i] = log1p(exp(s->output[i])) + 1e-6; |
||||
} |
||||
// to radians
|
||||
for (int i = 3; i < 6; i++) { |
||||
s->output[i] = M_PI * s->output[i] / 180.0; |
||||
} |
||||
// to radians
|
||||
for (int i = 9; i < 12; i++) { |
||||
s->output[i] = M_PI * s->output[i] / 180.0; |
||||
} |
||||
} |
||||
|
||||
void posenet_free(PosenetState *s) { |
||||
delete s->m; |
||||
free(s->input); |
||||
} |
||||
|
@ -1,27 +0,0 @@ |
||||
#ifndef POSENET_H |
||||
#define POSENET_H |
||||
|
||||
#include <stdint.h> |
||||
#include "runners/run.h" |
||||
|
||||
#ifdef __cplusplus |
||||
extern "C" { |
||||
#endif |
||||
|
||||
typedef struct PosenetState { |
||||
float output[12]; |
||||
float *input; |
||||
RunModel *m; |
||||
} PosenetState; |
||||
|
||||
void posenet_init(PosenetState *s); |
||||
void posenet_push(PosenetState *s, uint8_t *yuv_ptr_y, int yuv_width); |
||||
void posenet_eval(PosenetState *s); |
||||
void posenet_free(PosenetState *s); |
||||
|
||||
#ifdef __cplusplus |
||||
} |
||||
#endif |
||||
|
||||
#endif |
||||
|
@ -1,5 +1,5 @@ |
||||
#!/bin/sh |
||||
export LD_LIBRARY_PATH="/data/pythonpath/phonelibs/snpe/aarch64-android-clang3.8:$LD_LIBRARY_PATH" |
||||
export LD_LIBRARY_PATH="/data/pythonpath/phonelibs/snpe/aarch64-android-clang3.8:/home/batman/one/phonelibs/snpe/x86_64-linux-clang:$LD_LIBRARY_PATH" |
||||
export ADSP_LIBRARY_PATH="/data/pythonpath/phonelibs/snpe/aarch64-android-clang3.8/" |
||||
exec ./_monitoringd |
||||
|
||||
|
@ -1 +1 @@ |
||||
89304bdcab73fa43a8dd39cab93bc4ea4c9cbbdb |
||||
b60841eb6cf09037200bc2daacf0c9cf69b358fe |
@ -1,5 +1,15 @@ |
||||
Import('env', 'common', 'messaging', 'gpucommon', 'visionipc', 'cereal') |
||||
Import('env', 'arch', 'common', 'messaging', 'gpucommon', 'visionipc', 'cereal') |
||||
|
||||
env.Program('_ui', ['ui.cc', 'slplay.c', '#phonelibs/nanovg/nanovg.c'], |
||||
src = ['ui.cc', 'paint.cc', '#phonelibs/nanovg/nanovg.c'] |
||||
libs = [common, 'zmq', 'czmq', 'capnp', 'capnp_c', 'm', cereal, 'json', messaging, 'OpenCL', gpucommon, visionipc] |
||||
|
||||
if arch == "aarch64": |
||||
src += ['sound.cc', 'slplay.c'] |
||||
libs += ['EGL', 'GLESv3', 'gnustl_shared', 'log', 'utils', 'gui', 'hardware', 'ui', 'CB', 'gsl', 'adreno_utils', 'OpenSLES', 'cutils', 'uuid'] |
||||
else: |
||||
src += ['linux.cc'] |
||||
libs += ['EGL', 'pthread', 'X11-xcb', 'xcb', 'X11', 'glfw'] |
||||
|
||||
env.Program('_ui', src, |
||||
LINKFLAGS=['-Wl,-rpath=/system/lib64,-rpath=/system/comma/usr/lib'], |
||||
LIBS=[common, 'zmq', 'czmq', 'capnp', 'capnp_c', 'm', 'GLESv3', 'EGL', cereal, 'gnustl_shared', 'log', 'utils', 'gui', 'hardware', 'ui', 'json', messaging, 'CB', 'OpenCL', 'gsl', 'adreno_utils', 'OpenSLES', 'cutils', 'uuid', gpucommon, visionipc]) |
||||
LIBS=libs) |
||||
|
@ -0,0 +1,100 @@ |
||||
#include <stdio.h> |
||||
#include <stdint.h> |
||||
#include <stdlib.h> |
||||
#include <assert.h> |
||||
#include <string.h> |
||||
|
||||
#include "ui.hpp" |
||||
|
||||
#define GLFW_INCLUDE_ES2 |
||||
#define GLFW_INCLUDE_GLEXT |
||||
#include <GLFW/glfw3.h> |
||||
|
||||
typedef struct FramebufferState FramebufferState; |
||||
typedef struct TouchState TouchState; |
||||
|
||||
#define FALSE 0 |
||||
#define TRUE 1 |
||||
|
||||
#include <xcb/xcb.h> |
||||
#include <X11/Xlib-xcb.h> |
||||
|
||||
extern "C" { |
||||
|
||||
FramebufferState* framebuffer_init( |
||||
const char* name, int32_t layer, int alpha, |
||||
int *out_w, int *out_h) { |
||||
glfwInit(); |
||||
|
||||
glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API); |
||||
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); |
||||
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0); |
||||
glfwWindowHint(GLFW_RESIZABLE, 0); |
||||
GLFWwindow* window; |
||||
window = glfwCreateWindow(1920, 1080, "ui", NULL, NULL); |
||||
if (!window) { |
||||
printf("glfwCreateWindow failed\n"); |
||||
} |
||||
|
||||
glfwMakeContextCurrent(window); |
||||
glfwSwapInterval(0); |
||||
|
||||
// clear screen
|
||||
glClearColor(0.2f, 0.2f, 0.2f, 1.0f ); |
||||
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); |
||||
framebuffer_swap((FramebufferState*)window); |
||||
|
||||
if (out_w) *out_w = 1920; |
||||
if (out_h) *out_h = 1080; |
||||
|
||||
return (FramebufferState*)window; |
||||
} |
||||
|
||||
void framebuffer_set_power(FramebufferState *s, int mode) { |
||||
} |
||||
|
||||
void framebuffer_swap(FramebufferState *s) { |
||||
glfwSwapBuffers((GLFWwindow*)s); |
||||
} |
||||
|
||||
void touch_init(TouchState *s) { |
||||
printf("touch_init\n"); |
||||
} |
||||
|
||||
int touch_poll(TouchState *s, int* out_x, int* out_y, int timeout) { |
||||
return -1; |
||||
} |
||||
|
||||
int touch_read(TouchState *s, int* out_x, int* out_y) { |
||||
return -1; |
||||
} |
||||
|
||||
} |
||||
|
||||
#include "sound.hpp" |
||||
|
||||
void ui_sound_init() {} |
||||
void ui_sound_destroy() {} |
||||
|
||||
void set_volume(int volume) {} |
||||
|
||||
void play_alert_sound(AudibleAlert alert) {} |
||||
void stop_alert_sound(AudibleAlert alert) {} |
||||
|
||||
#include "common/visionimg.h" |
||||
#include <sys/mman.h> |
||||
|
||||
GLuint visionimg_to_gl(const VisionImg *img, EGLImageKHR *pkhr, void **pph) { |
||||
unsigned int texture; |
||||
glGenTextures(1, &texture); |
||||
glBindTexture(GL_TEXTURE_2D, texture); |
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img->width, img->height, 0, GL_RGB, GL_UNSIGNED_BYTE, *pph); |
||||
glGenerateMipmap(GL_TEXTURE_2D); |
||||
*pkhr = (EGLImageKHR *)1; // not NULL
|
||||
return texture; |
||||
} |
||||
|
||||
void visionimg_destroy_gl(EGLImageKHR khr, void *ph) { |
||||
// empty
|
||||
} |
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,85 @@ |
||||
#include <stdlib.h> |
||||
#include "sound.hpp" |
||||
|
||||
#include "common/swaglog.h" |
||||
|
||||
typedef struct { |
||||
AudibleAlert alert; |
||||
const char* uri; |
||||
bool loop; |
||||
} sound_file; |
||||
|
||||
extern "C"{ |
||||
#include "slplay.h" |
||||
} |
||||
|
||||
void set_volume(int volume) { |
||||
char volume_change_cmd[64]; |
||||
sprintf(volume_change_cmd, "service call audio 3 i32 3 i32 %d i32 1 &", volume); |
||||
|
||||
// 5 second timeout at 60fps
|
||||
int volume_changed = system(volume_change_cmd); |
||||
} |
||||
|
||||
|
||||
sound_file sound_table[] = { |
||||
{ cereal_CarControl_HUDControl_AudibleAlert_chimeDisengage, "../assets/sounds/disengaged.wav", false }, |
||||
{ cereal_CarControl_HUDControl_AudibleAlert_chimeEngage, "../assets/sounds/engaged.wav", false }, |
||||
{ cereal_CarControl_HUDControl_AudibleAlert_chimeWarning1, "../assets/sounds/warning_1.wav", false }, |
||||
{ cereal_CarControl_HUDControl_AudibleAlert_chimeWarning2, "../assets/sounds/warning_2.wav", false }, |
||||
{ cereal_CarControl_HUDControl_AudibleAlert_chimeWarningRepeat, "../assets/sounds/warning_repeat.wav", true }, |
||||
{ cereal_CarControl_HUDControl_AudibleAlert_chimeError, "../assets/sounds/error.wav", false }, |
||||
{ cereal_CarControl_HUDControl_AudibleAlert_chimePrompt, "../assets/sounds/error.wav", false }, |
||||
{ cereal_CarControl_HUDControl_AudibleAlert_none, NULL, false }, |
||||
}; |
||||
|
||||
sound_file* get_sound_file(AudibleAlert alert) { |
||||
for (sound_file *s = sound_table; s->alert != cereal_CarControl_HUDControl_AudibleAlert_none; s++) { |
||||
if (s->alert == alert) { |
||||
return s; |
||||
} |
||||
} |
||||
|
||||
return NULL; |
||||
} |
||||
|
||||
void play_alert_sound(AudibleAlert alert) { |
||||
sound_file* sound = get_sound_file(alert); |
||||
char* error = NULL; |
||||
|
||||
slplay_play(sound->uri, sound->loop, &error); |
||||
if(error) { |
||||
LOGW("error playing sound: %s", error); |
||||
} |
||||
} |
||||
|
||||
void stop_alert_sound(AudibleAlert alert) { |
||||
sound_file* sound = get_sound_file(alert); |
||||
char* error = NULL; |
||||
|
||||
slplay_stop_uri(sound->uri, &error); |
||||
if(error) { |
||||
LOGW("error stopping sound: %s", error); |
||||
} |
||||
} |
||||
|
||||
void ui_sound_init() { |
||||
char *error = NULL; |
||||
slplay_setup(&error); |
||||
if (error) goto fail; |
||||
|
||||
for (sound_file *s = sound_table; s->alert != cereal_CarControl_HUDControl_AudibleAlert_none; s++) { |
||||
slplay_create_player_for_uri(s->uri, &error); |
||||
if (error) goto fail; |
||||
} |
||||
return; |
||||
|
||||
fail: |
||||
LOGW(error); |
||||
exit(1); |
||||
} |
||||
|
||||
void ui_sound_destroy() { |
||||
slplay_destroy(); |
||||
} |
||||
|
@ -0,0 +1,17 @@ |
||||
#ifndef __SOUND_HPP |
||||
#define __SOUND_HPP |
||||
|
||||
#include "cereal/gen/c/log.capnp.h" |
||||
|
||||
typedef enum cereal_CarControl_HUDControl_AudibleAlert AudibleAlert; |
||||
|
||||
void ui_sound_init(); |
||||
void ui_sound_destroy(); |
||||
|
||||
void set_volume(int volume); |
||||
|
||||
void play_alert_sound(AudibleAlert alert); |
||||
void stop_alert_sound(AudibleAlert alert); |
||||
|
||||
#endif |
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,253 @@ |
||||
#ifndef _UI_H |
||||
#define _UI_H |
||||
|
||||
#include <GLES3/gl3.h> |
||||
#include <EGL/egl.h> |
||||
|
||||
#include "nanovg.h" |
||||
|
||||
#include "common/mat.h" |
||||
#include "common/visionipc.h" |
||||
#include "common/framebuffer.h" |
||||
#include "common/modeldata.h" |
||||
#include "messaging.hpp" |
||||
|
||||
#include "cereal/gen/c/log.capnp.h" |
||||
|
||||
#include "sound.hpp" |
||||
|
||||
#define STATUS_STOPPED 0 |
||||
#define STATUS_DISENGAGED 1 |
||||
#define STATUS_ENGAGED 2 |
||||
#define STATUS_WARNING 3 |
||||
#define STATUS_ALERT 4 |
||||
|
||||
#define ALERTSIZE_NONE 0 |
||||
#define ALERTSIZE_SMALL 1 |
||||
#define ALERTSIZE_MID 2 |
||||
#define ALERTSIZE_FULL 3 |
||||
|
||||
#ifndef QCOM |
||||
#define UI_60FPS |
||||
#endif |
||||
|
||||
#define UI_BUF_COUNT 4 |
||||
//#define SHOW_SPEEDLIMIT 1
|
||||
//#define DEBUG_TURN
|
||||
|
||||
const int vwp_w = 1920; |
||||
const int vwp_h = 1080; |
||||
const int nav_w = 640; |
||||
const int nav_ww= 760; |
||||
const int sbr_w = 300; |
||||
const int bdr_s = 30; |
||||
const int box_x = sbr_w+bdr_s; |
||||
const int box_y = bdr_s; |
||||
const int box_w = vwp_w-sbr_w-(bdr_s*2); |
||||
const int box_h = vwp_h-(bdr_s*2); |
||||
const int viz_w = vwp_w-(bdr_s*2); |
||||
const int header_h = 420; |
||||
const int footer_h = 280; |
||||
const int footer_y = vwp_h-bdr_s-footer_h; |
||||
|
||||
const int UI_FREQ = 30; // Hz
|
||||
|
||||
const int MODEL_PATH_MAX_VERTICES_CNT = 98; |
||||
const int MODEL_LANE_PATH_CNT = 3; |
||||
const int TRACK_POINTS_MAX_CNT = 50 * 2; |
||||
|
||||
const int SET_SPEED_NA = 255; |
||||
|
||||
const uint8_t bg_colors[][4] = { |
||||
[STATUS_STOPPED] = {0x07, 0x23, 0x39, 0xff}, |
||||
[STATUS_DISENGAGED] = {0x17, 0x33, 0x49, 0xff}, |
||||
[STATUS_ENGAGED] = {0x17, 0x86, 0x44, 0xff}, |
||||
[STATUS_WARNING] = {0xDA, 0x6F, 0x25, 0xff}, |
||||
[STATUS_ALERT] = {0xC9, 0x22, 0x31, 0xff}, |
||||
}; |
||||
|
||||
|
||||
typedef struct UIScene { |
||||
int frontview; |
||||
int fullview; |
||||
|
||||
int transformed_width, transformed_height; |
||||
|
||||
ModelData model; |
||||
|
||||
float mpc_x[50]; |
||||
float mpc_y[50]; |
||||
|
||||
bool world_objects_visible; |
||||
mat4 extrinsic_matrix; // Last row is 0 so we can use mat4.
|
||||
|
||||
float v_cruise; |
||||
uint64_t v_cruise_update_ts; |
||||
float v_ego; |
||||
bool decel_for_model; |
||||
|
||||
float speedlimit; |
||||
bool speedlimit_valid; |
||||
bool map_valid; |
||||
|
||||
float curvature; |
||||
int engaged; |
||||
bool engageable; |
||||
bool monitoring_active; |
||||
|
||||
bool uilayout_sidebarcollapsed; |
||||
bool uilayout_mapenabled; |
||||
// responsive layout
|
||||
int ui_viz_rx; |
||||
int ui_viz_rw; |
||||
int ui_viz_ro; |
||||
|
||||
int lead_status; |
||||
float lead_d_rel, lead_y_rel, lead_v_rel; |
||||
|
||||
int front_box_x, front_box_y, front_box_width, front_box_height; |
||||
|
||||
uint64_t alert_ts; |
||||
char alert_text1[1024]; |
||||
char alert_text2[1024]; |
||||
uint8_t alert_size; |
||||
float alert_blinkingrate; |
||||
|
||||
float awareness_status; |
||||
|
||||
// Used to show gps planner status
|
||||
bool gps_planner_active; |
||||
} UIScene; |
||||
|
||||
typedef struct { |
||||
float x, y; |
||||
}vertex_data; |
||||
|
||||
typedef struct { |
||||
vertex_data v[MODEL_PATH_MAX_VERTICES_CNT]; |
||||
int cnt; |
||||
} model_path_vertices_data; |
||||
|
||||
typedef struct { |
||||
vertex_data v[TRACK_POINTS_MAX_CNT]; |
||||
int cnt; |
||||
} track_vertices_data; |
||||
|
||||
|
||||
typedef struct UIState { |
||||
pthread_mutex_t lock; |
||||
pthread_cond_t bg_cond; |
||||
|
||||
// framebuffer
|
||||
FramebufferState *fb; |
||||
int fb_w, fb_h; |
||||
EGLDisplay display; |
||||
EGLSurface surface; |
||||
|
||||
// NVG
|
||||
NVGcontext *vg; |
||||
|
||||
// fonts and images
|
||||
int font_courbd; |
||||
int font_sans_regular; |
||||
int font_sans_semibold; |
||||
int font_sans_bold; |
||||
int img_wheel; |
||||
int img_turn; |
||||
int img_face; |
||||
int img_map; |
||||
|
||||
// sockets
|
||||
Context *ctx; |
||||
SubSocket *model_sock; |
||||
SubSocket *controlsstate_sock; |
||||
SubSocket *livecalibration_sock; |
||||
SubSocket *radarstate_sock; |
||||
SubSocket *map_data_sock; |
||||
SubSocket *uilayout_sock; |
||||
Poller * poller; |
||||
|
||||
int active_app; |
||||
|
||||
// vision state
|
||||
bool vision_connected; |
||||
bool vision_connect_firstrun; |
||||
int ipc_fd; |
||||
|
||||
VIPCBuf bufs[UI_BUF_COUNT]; |
||||
VIPCBuf front_bufs[UI_BUF_COUNT]; |
||||
int cur_vision_idx; |
||||
int cur_vision_front_idx; |
||||
|
||||
GLuint frame_program; |
||||
GLuint frame_texs[UI_BUF_COUNT]; |
||||
EGLImageKHR khr[UI_BUF_COUNT]; |
||||
void *priv_hnds[UI_BUF_COUNT]; |
||||
GLuint frame_front_texs[UI_BUF_COUNT]; |
||||
EGLImageKHR khr_front[UI_BUF_COUNT]; |
||||
void *priv_hnds_front[UI_BUF_COUNT]; |
||||
|
||||
GLint frame_pos_loc, frame_texcoord_loc; |
||||
GLint frame_texture_loc, frame_transform_loc; |
||||
|
||||
GLuint line_program; |
||||
GLint line_pos_loc, line_color_loc; |
||||
GLint line_transform_loc; |
||||
|
||||
int rgb_width, rgb_height, rgb_stride; |
||||
size_t rgb_buf_len; |
||||
mat4 rgb_transform; |
||||
|
||||
int rgb_front_width, rgb_front_height, rgb_front_stride; |
||||
size_t rgb_front_buf_len; |
||||
|
||||
UIScene scene; |
||||
bool awake; |
||||
|
||||
// timeouts
|
||||
int awake_timeout; |
||||
int volume_timeout; |
||||
int controls_timeout; |
||||
int alert_sound_timeout; |
||||
int speed_lim_off_timeout; |
||||
int is_metric_timeout; |
||||
int longitudinal_control_timeout; |
||||
int limit_set_speed_timeout; |
||||
|
||||
bool controls_seen; |
||||
|
||||
int status; |
||||
bool is_metric; |
||||
bool longitudinal_control; |
||||
bool limit_set_speed; |
||||
float speed_lim_off; |
||||
bool is_ego_over_limit; |
||||
char alert_type[64]; |
||||
AudibleAlert alert_sound; |
||||
int alert_size; |
||||
float alert_blinking_alpha; |
||||
bool alert_blinked; |
||||
|
||||
float light_sensor; |
||||
|
||||
int touch_fd; |
||||
|
||||
// Hints for re-calculations and redrawing
|
||||
bool model_changed; |
||||
bool livempc_or_radarstate_changed; |
||||
|
||||
GLuint frame_vao[2], frame_vbo[2], frame_ibo[2]; |
||||
mat4 rear_frame_mat, front_frame_mat; |
||||
|
||||
model_path_vertices_data model_path_vertices[MODEL_LANE_PATH_CNT * 2]; |
||||
|
||||
track_vertices_data track_vertices[2]; |
||||
} UIState; |
||||
|
||||
// API
|
||||
void ui_draw_vision_alert(UIState *s, int va_size, int va_color, |
||||
const char* va_text1, const char* va_text2);
|
||||
void ui_draw(UIState *s); |
||||
void ui_nvg_init(UIState *s); |
||||
|
||||
#endif |
@ -1,57 +1,361 @@ |
||||
#!/usr/bin/env python3 |
||||
|
||||
# simple service that waits for network access and tries to update every hour |
||||
# Safe Update: A simple service that waits for network access and tries to |
||||
# update every 10 minutes. It's intended to make the OP update process more |
||||
# robust against Git repository corruption. This service DOES NOT try to fix |
||||
# an already-corrupt BASEDIR Git repo, only prevent it from happening. |
||||
# |
||||
# During normal operation, both onroad and offroad, the update process makes |
||||
# no changes to the BASEDIR install of OP. All update attempts are performed |
||||
# in a disposable staging area provided by OverlayFS. It assumes the deleter |
||||
# process provides enough disk space to carry out the process. |
||||
# |
||||
# If an update succeeds, a flag is set, and the update is swapped in at the |
||||
# next reboot. If an update is interrupted or otherwise fails, the OverlayFS |
||||
# upper layer and metadata can be discarded before trying again. |
||||
# |
||||
# The swap on boot is triggered by launch_chffrplus.sh |
||||
# gated on the existence of $FINALIZED/.overlay_consistent and also the |
||||
# existence and mtime of $BASEDIR/.overlay_init. |
||||
# |
||||
# Other than build byproducts, BASEDIR should not be modified while this |
||||
# service is running. Developers modifying code directly in BASEDIR should |
||||
# disable this service. |
||||
|
||||
import os |
||||
import datetime |
||||
import subprocess |
||||
import time |
||||
import psutil |
||||
from stat import S_ISREG, S_ISDIR, S_ISLNK, S_IMODE, ST_MODE, ST_INO, ST_UID, ST_GID, ST_ATIME, ST_MTIME |
||||
import shutil |
||||
import signal |
||||
from pathlib import Path |
||||
import fcntl |
||||
import threading |
||||
from cffi import FFI |
||||
|
||||
from common.basedir import BASEDIR |
||||
from common.params import Params |
||||
from selfdrive.swaglog import cloudlog |
||||
|
||||
STAGING_ROOT = "/data/safe_staging" |
||||
|
||||
OVERLAY_UPPER = os.path.join(STAGING_ROOT, "upper") |
||||
OVERLAY_METADATA = os.path.join(STAGING_ROOT, "metadata") |
||||
OVERLAY_MERGED = os.path.join(STAGING_ROOT, "merged") |
||||
FINALIZED = os.path.join(STAGING_ROOT, "finalized") |
||||
|
||||
NICE_LOW_PRIORITY = ["nice", "-n", "19"] |
||||
SHORT = os.getenv("SHORT") is not None |
||||
|
||||
# Workaround for the EON/termux build of Python having os.link removed. |
||||
ffi = FFI() |
||||
ffi.cdef("int link(const char *oldpath, const char *newpath);") |
||||
libc = ffi.dlopen(None) |
||||
|
||||
|
||||
class WaitTimeHelper: |
||||
ready_event = threading.Event() |
||||
shutdown = False |
||||
|
||||
def __init__(self): |
||||
signal.signal(signal.SIGTERM, self.graceful_shutdown) |
||||
signal.signal(signal.SIGINT, self.graceful_shutdown) |
||||
signal.signal(signal.SIGHUP, self.update_now) |
||||
|
||||
def graceful_shutdown(self, signum, frame): |
||||
# umount -f doesn't appear effective in avoiding "device busy" on EON, |
||||
# so don't actually die until the next convenient opportunity in main(). |
||||
cloudlog.info("caught SIGINT/SIGTERM, dismounting overlay at next opportunity") |
||||
self.shutdown = True |
||||
self.ready_event.set() |
||||
|
||||
def update_now(self, signum, frame): |
||||
cloudlog.info("caught SIGHUP, running update check immediately") |
||||
self.ready_event.set() |
||||
|
||||
|
||||
def wait_between_updates(ready_event): |
||||
ready_event.clear() |
||||
if SHORT: |
||||
ready_event.wait(timeout=10) |
||||
else: |
||||
ready_event.wait(timeout=60 * 10) |
||||
|
||||
|
||||
def link(src, dest): |
||||
# Workaround for the EON/termux build of Python having os.link removed. |
||||
return libc.link(src.encode(), dest.encode()) |
||||
|
||||
|
||||
def run(cmd, cwd=None): |
||||
return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT, encoding='utf8') |
||||
|
||||
|
||||
def remove_consistent_flag(): |
||||
os.system("sync") |
||||
consistent_file = Path(os.path.join(FINALIZED, ".overlay_consistent")) |
||||
try: |
||||
consistent_file.unlink() |
||||
except FileNotFoundError: |
||||
pass |
||||
os.system("sync") |
||||
|
||||
|
||||
def set_consistent_flag(): |
||||
consistent_file = Path(os.path.join(FINALIZED, ".overlay_consistent")) |
||||
os.system("sync") |
||||
consistent_file.touch() |
||||
os.system("sync") |
||||
|
||||
|
||||
def set_update_available_params(new_version=False): |
||||
params = Params() |
||||
|
||||
t = datetime.datetime.now().isoformat() |
||||
params.put("LastUpdateTime", t.encode('utf8')) |
||||
|
||||
if new_version: |
||||
try: |
||||
with open(os.path.join(FINALIZED, "RELEASES.md"), "rb") as f: |
||||
r = f.read() |
||||
r = r[:r.find(b'\n\n')] # Slice latest release notes |
||||
params.put("ReleaseNotes", r + b"\n") |
||||
except Exception: |
||||
params.put("ReleaseNotes", "") |
||||
params.put("UpdateAvailable", "1") |
||||
|
||||
|
||||
def dismount_ovfs(): |
||||
if os.path.ismount(OVERLAY_MERGED): |
||||
cloudlog.error("unmounting existing overlay") |
||||
run(["umount", "-l", OVERLAY_MERGED]) |
||||
|
||||
|
||||
def init_ovfs(): |
||||
cloudlog.info("preparing new safe staging area") |
||||
Params().put("UpdateAvailable", "0") |
||||
|
||||
remove_consistent_flag() |
||||
|
||||
dismount_ovfs() |
||||
if os.path.isdir(STAGING_ROOT): |
||||
shutil.rmtree(STAGING_ROOT) |
||||
|
||||
for dirname in [STAGING_ROOT, OVERLAY_UPPER, OVERLAY_METADATA, OVERLAY_MERGED, FINALIZED]: |
||||
os.mkdir(dirname, 0o755) |
||||
if not os.lstat(BASEDIR).st_dev == os.lstat(OVERLAY_MERGED).st_dev: |
||||
raise RuntimeError("base and overlay merge directories are on different filesystems; not valid for overlay FS!") |
||||
|
||||
# Remove consistent flag from current BASEDIR so it's not copied over |
||||
if os.path.isfile(os.path.join(BASEDIR, ".overlay_consistent")): |
||||
os.remove(os.path.join(BASEDIR, ".overlay_consistent")) |
||||
|
||||
# We sync FS object atimes (which EON doesn't use) and mtimes, but ctimes |
||||
# are outside user control. Make sure Git is set up to ignore system ctimes, |
||||
# because they change when we make hard links during finalize. Otherwise, |
||||
# there is a lot of unnecessary churn. This appears to be a common need on |
||||
# OSX as well: https://www.git-tower.com/blog/make-git-rebase-safe-on-osx/ |
||||
run(["git", "config", "core.trustctime", "false"], BASEDIR) |
||||
|
||||
# We are temporarily using copytree to copy the directory, which also changes |
||||
# inode numbers. Ignore those changes too. |
||||
run(["git", "config", "core.checkStat", "minimal"], BASEDIR) |
||||
|
||||
# Leave a timestamped canary in BASEDIR to check at startup. The EON clock |
||||
# should be correct by the time we get here. If the init file disappears, or |
||||
# critical mtimes in BASEDIR are newer than .overlay_init, continue.sh can |
||||
# assume that BASEDIR has used for local development or otherwise modified, |
||||
# and skips the update activation attempt. |
||||
Path(os.path.join(BASEDIR, ".overlay_init")).touch() |
||||
|
||||
overlay_opts = f"lowerdir={BASEDIR},upperdir={OVERLAY_UPPER},workdir={OVERLAY_METADATA}" |
||||
run(["mount", "-t", "overlay", "-o", overlay_opts, "none", OVERLAY_MERGED]) |
||||
|
||||
|
||||
def inodes_in_tree(search_dir): |
||||
"""Given a search root, produce a dictionary mapping of inodes to relative |
||||
pathnames of regular files (no directories, symlinks, or special files).""" |
||||
inode_map = {} |
||||
for root, dirs, files in os.walk(search_dir, topdown=True): |
||||
for file_name in files: |
||||
full_path_name = os.path.join(root, file_name) |
||||
st = os.lstat(full_path_name) |
||||
if S_ISREG(st[ST_MODE]): |
||||
inode_map[st[ST_INO]] = full_path_name |
||||
return inode_map |
||||
|
||||
|
||||
def dup_ovfs_object(inode_map, source_obj, target_dir): |
||||
"""Given a relative pathname to copy, and a new target root, duplicate the |
||||
source object in the target root, using hardlinks for regular files.""" |
||||
|
||||
source_full_path = os.path.join(OVERLAY_MERGED, source_obj) |
||||
st = os.lstat(source_full_path) |
||||
target_full_path = os.path.join(target_dir, source_obj) |
||||
|
||||
if S_ISREG(st[ST_MODE]): |
||||
# Hardlink all regular files; ownership and permissions are shared. |
||||
link(inode_map[st[ST_INO]], target_full_path) |
||||
else: |
||||
# Recreate all directories and symlinks; copy ownership and permissions. |
||||
if S_ISDIR(st[ST_MODE]): |
||||
os.mkdir(os.path.join(FINALIZED, source_obj), S_IMODE(st[ST_MODE])) |
||||
elif S_ISLNK(st[ST_MODE]): |
||||
os.symlink(os.readlink(source_full_path), target_full_path) |
||||
os.chmod(target_full_path, S_IMODE(st[ST_MODE]), follow_symlinks=False) |
||||
else: |
||||
# Ran into a FIFO, socket, etc. Should not happen in OP install dir. |
||||
# Ignore without copying for the time being; revisit later if needed. |
||||
cloudlog.error("can't copy this file type: %s" % source_full_path) |
||||
os.chown(target_full_path, st[ST_UID], st[ST_GID], follow_symlinks=False) |
||||
|
||||
# Sync target mtimes to the cached lstat() value from each source object. |
||||
# Restores shared inode mtimes after linking, fixes symlinks and dirs. |
||||
os.utime(target_full_path, (st[ST_ATIME], st[ST_MTIME]), follow_symlinks=False) |
||||
|
||||
|
||||
def finalize_from_ovfs_hardlink(): |
||||
"""Take the current OverlayFS merged view and finalize a copy outside of |
||||
OverlayFS, ready to be swapped-in at BASEDIR. Copy using hardlinks""" |
||||
|
||||
cloudlog.info("creating finalized version of the overlay") |
||||
|
||||
# The "copy" is done with hardlinks, but since the OverlayFS merge looks |
||||
# like a different filesystem, and hardlinks can't cross filesystems, we |
||||
# have to borrow a source pathname from the upper or lower layer. |
||||
inode_map = inodes_in_tree(BASEDIR) |
||||
inode_map.update(inodes_in_tree(OVERLAY_UPPER)) |
||||
|
||||
shutil.rmtree(FINALIZED) |
||||
os.umask(0o077) |
||||
os.mkdir(FINALIZED) |
||||
for root, dirs, files in os.walk(OVERLAY_MERGED, topdown=True): |
||||
for obj_name in dirs: |
||||
relative_path_name = os.path.relpath(os.path.join(root, obj_name), OVERLAY_MERGED) |
||||
dup_ovfs_object(inode_map, relative_path_name, FINALIZED) |
||||
for obj_name in files: |
||||
relative_path_name = os.path.relpath(os.path.join(root, obj_name), OVERLAY_MERGED) |
||||
dup_ovfs_object(inode_map, relative_path_name, FINALIZED) |
||||
cloudlog.info("done finalizing overlay") |
||||
|
||||
|
||||
def finalize_from_ovfs_copy(): |
||||
"""Take the current OverlayFS merged view and finalize a copy outside of |
||||
OverlayFS, ready to be swapped-in at BASEDIR. Copy using shutil.copytree""" |
||||
|
||||
cloudlog.info("creating finalized version of the overlay") |
||||
shutil.rmtree(FINALIZED) |
||||
shutil.copytree(OVERLAY_MERGED, FINALIZED, symlinks=True) |
||||
cloudlog.info("done finalizing overlay") |
||||
|
||||
|
||||
def attempt_update(): |
||||
cloudlog.info("attempting git update inside staging overlay") |
||||
|
||||
git_fetch_output = run(NICE_LOW_PRIORITY + ["git", "fetch"], OVERLAY_MERGED) |
||||
cloudlog.info("git fetch success: %s", git_fetch_output) |
||||
|
||||
cur_hash = run(["git", "rev-parse", "HEAD"], OVERLAY_MERGED).rstrip() |
||||
upstream_hash = run(["git", "rev-parse", "@{u}"], OVERLAY_MERGED).rstrip() |
||||
new_version = cur_hash != upstream_hash |
||||
|
||||
git_fetch_result = len(git_fetch_output) > 0 and (git_fetch_output != "Failed to add the host to the list of known hosts (/data/data/com.termux/files/home/.ssh/known_hosts).\n") |
||||
|
||||
cloudlog.info("comparing %s to %s" % (cur_hash, upstream_hash)) |
||||
if new_version or git_fetch_result: |
||||
cloudlog.info("Running update") |
||||
if new_version: |
||||
cloudlog.info("git reset in progress") |
||||
r = [ |
||||
run(NICE_LOW_PRIORITY + ["git", "reset", "--hard", "@{u}"], OVERLAY_MERGED), |
||||
run(NICE_LOW_PRIORITY + ["git", "clean", "-xdf"], OVERLAY_MERGED), |
||||
run(NICE_LOW_PRIORITY + ["git", "submodule", "init"], OVERLAY_MERGED), |
||||
run(NICE_LOW_PRIORITY + ["git", "submodule", "update"], OVERLAY_MERGED), |
||||
] |
||||
cloudlog.info("git reset success: %s", '\n'.join(r)) |
||||
|
||||
# Un-set the validity flag to prevent the finalized tree from being |
||||
# activated later if the finalize step is interrupted |
||||
remove_consistent_flag() |
||||
|
||||
finalize_from_ovfs_copy() |
||||
|
||||
# Make sure the validity flag lands on disk LAST, only when the local git |
||||
# repo and OP install are in a consistent state. |
||||
set_consistent_flag() |
||||
|
||||
cloudlog.info("update successful!") |
||||
else: |
||||
cloudlog.info("nothing new from git at this time") |
||||
|
||||
set_update_available_params(new_version=new_version) |
||||
|
||||
|
||||
def main(gctx=None): |
||||
overlay_init_done = False |
||||
wait_helper = WaitTimeHelper() |
||||
params = Params() |
||||
|
||||
if not os.geteuid() == 0: |
||||
raise RuntimeError("updated must be launched as root!") |
||||
|
||||
# Set low io priority |
||||
p = psutil.Process() |
||||
if psutil.LINUX: |
||||
p.ionice(psutil.IOPRIO_CLASS_BE, value=7) |
||||
|
||||
ov_lock_fd = open('/tmp/safe_staging_overlay.lock', 'w') |
||||
try: |
||||
fcntl.flock(ov_lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) |
||||
except IOError: |
||||
raise RuntimeError("couldn't get overlay lock; is another updated running?") |
||||
|
||||
while True: |
||||
time_wrong = datetime.datetime.now().year < 2019 |
||||
ping_failed = subprocess.call(["ping", "-W", "4", "-c", "1", "8.8.8.8"]) |
||||
if ping_failed or time_wrong: |
||||
time.sleep(60) |
||||
continue |
||||
|
||||
# download application update |
||||
try: |
||||
r = subprocess.check_output(NICE_LOW_PRIORITY + ["git", "fetch"], stderr=subprocess.STDOUT).decode('utf8') |
||||
except subprocess.CalledProcessError as e: |
||||
cloudlog.event("git fetch failed", |
||||
cmd=e.cmd, |
||||
output=e.output, |
||||
returncode=e.returncode) |
||||
time.sleep(60) |
||||
continue |
||||
cloudlog.info("git fetch success: %s", r) |
||||
|
||||
# Write update available param |
||||
try: |
||||
cur_hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).rstrip() |
||||
upstream_hash = subprocess.check_output(["git", "rev-parse", "@{u}"]).rstrip() |
||||
params.put("UpdateAvailable", str(int(cur_hash != upstream_hash))) |
||||
except: |
||||
params.put("UpdateAvailable", "0") |
||||
# Wait until we have a valid datetime to initialize the overlay |
||||
if not (ping_failed or time_wrong): |
||||
try: |
||||
# If the git directory has modifcations after we created the overlay |
||||
# we need to recreate the overlay |
||||
if overlay_init_done: |
||||
overlay_init_fn = os.path.join(BASEDIR, ".overlay_init") |
||||
git_dir_path = os.path.join(BASEDIR, ".git") |
||||
new_files = run(["find", git_dir_path, "-newer", overlay_init_fn]) |
||||
|
||||
# Write latest release notes to param |
||||
try: |
||||
r = subprocess.check_output(["git", "--no-pager", "show", "@{u}:RELEASES.md"]) |
||||
r = r[:r.find(b'\n\n')] # Slice latest release notes |
||||
params.put("ReleaseNotes", r + b"\n") |
||||
except: |
||||
params.put("ReleaseNotes", "") |
||||
if len(new_files.splitlines()): |
||||
cloudlog.info(".git directory changed, recreating overlay") |
||||
overlay_init_done = False |
||||
|
||||
if not overlay_init_done: |
||||
init_ovfs() |
||||
overlay_init_done = True |
||||
|
||||
if params.get("IsOffroad") == b"1": |
||||
attempt_update() |
||||
else: |
||||
cloudlog.info("not running updater, openpilot running") |
||||
|
||||
except subprocess.CalledProcessError as e: |
||||
cloudlog.event( |
||||
"update process failed", |
||||
cmd=e.cmd, |
||||
output=e.output, |
||||
returncode=e.returncode |
||||
) |
||||
overlay_init_done = False |
||||
except Exception: |
||||
cloudlog.exception("uncaught updated exception, shouldn't happen") |
||||
overlay_init_done = False |
||||
|
||||
t = datetime.datetime.now().isoformat() |
||||
params.put("LastUpdateTime", t.encode('utf8')) |
||||
wait_between_updates(wait_helper.ready_event) |
||||
if wait_helper.shutdown: |
||||
break |
||||
|
||||
time.sleep(60*60) |
||||
# We've been signaled to shut down |
||||
dismount_ovfs() |
||||
|
||||
if __name__ == "__main__": |
||||
main() |
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue