agnos updater (#2600)
* agnos updater * add manifest * fix path * get manifest from overlay * update manifest * remove merge markers * add streaming decompressor * dont need read all * Unsparsify * Fix output filename * Optimization * cleanup * Small cleanup * Read manifest from merged overlay * Write hash at end of partition * Sync before writing hash * Write bytes in file * add manifest with image sizes * Fix manifest path * File was closed already * Format string * Put raw hash * Read hashes in launch script * update launch script * should be agnos version * fix slot * Make sure we clear the hash * Verify partition size * move updated * Standalone flasher * Don't rely on ordering * Get path * Debug log * Download agnos * Info is enough * update manifest * Remove f * Check downloader return code * Exit on wrong manifest * Fix typos * Set pythonpath before hardware init * move agnos into hardware folder * remove comments * Fix abstractmethod Co-authored-by: Comma Device <device@comma.ai> Co-authored-by: Willem Melching <willem.melching@gmail.com>pull/2004/head
parent
8039361567
commit
b276881fcd
11 changed files with 272 additions and 15 deletions
@ -0,0 +1,18 @@ |
|||||||
|
[ |
||||||
|
{ |
||||||
|
"name": "system", |
||||||
|
"url": "https://commadist.azureedge.net/agnosupdate-staging/system-1e5748c84d9b48bbe599c88da63e46b7ed1c0f1245486cfbcfb05399bd16dbb6.img.xz", |
||||||
|
"hash": "de6afb8651dc011fed8a883bac191e61d4b58e2e1c84c2717816b64f71afd3b1", |
||||||
|
"hash_raw": "1e5748c84d9b48bbe599c88da63e46b7ed1c0f1245486cfbcfb05399bd16dbb6", |
||||||
|
"size": 10737418240, |
||||||
|
"sparse": true |
||||||
|
}, |
||||||
|
{ |
||||||
|
"name": "boot", |
||||||
|
"url": "https://commadist.azureedge.net/agnosupdate-staging/boot-21649cb35935a92776155e7bb23e437e7e7eb0500c082df89f59a6d4a4ed639a.img.xz", |
||||||
|
"hash": "21649cb35935a92776155e7bb23e437e7e7eb0500c082df89f59a6d4a4ed639a", |
||||||
|
"hash_raw": "21649cb35935a92776155e7bb23e437e7e7eb0500c082df89f59a6d4a4ed639a", |
||||||
|
"size": 14678016, |
||||||
|
"sparse": false |
||||||
|
} |
||||||
|
] |
@ -0,0 +1,149 @@ |
|||||||
|
#!/usr/bin/env python3 |
||||||
|
import json |
||||||
|
import lzma |
||||||
|
import hashlib |
||||||
|
import requests |
||||||
|
import struct |
||||||
|
import subprocess |
||||||
|
import os |
||||||
|
|
||||||
|
from common.spinner import Spinner |
||||||
|
|
||||||
|
|
||||||
|
class StreamingDecompressor: |
||||||
|
def __init__(self, url): |
||||||
|
self.buf = b"" |
||||||
|
|
||||||
|
self.req = requests.get(url, stream=True, headers={'Accept-Encoding': None}) |
||||||
|
self.it = self.req.iter_content(chunk_size=1024 * 1024) |
||||||
|
self.decompressor = lzma.LZMADecompressor(format=lzma.FORMAT_AUTO) |
||||||
|
self.eof = False |
||||||
|
self.sha256 = hashlib.sha256() |
||||||
|
|
||||||
|
def read(self, length): |
||||||
|
while len(self.buf) < length: |
||||||
|
self.req.raise_for_status() |
||||||
|
|
||||||
|
try: |
||||||
|
compressed = next(self.it) |
||||||
|
except StopIteration: |
||||||
|
self.eof = True |
||||||
|
break |
||||||
|
out = self.decompressor.decompress(compressed) |
||||||
|
self.buf += out |
||||||
|
|
||||||
|
result = self.buf[:length] |
||||||
|
self.buf = self.buf[length:] |
||||||
|
|
||||||
|
self.sha256.update(result) |
||||||
|
return result |
||||||
|
|
||||||
|
|
||||||
|
def unsparsify(f): |
||||||
|
magic = struct.unpack("I", f.read(4))[0] |
||||||
|
assert(magic == 0xed26ff3a) |
||||||
|
|
||||||
|
# Version |
||||||
|
major = struct.unpack("H", f.read(2))[0] |
||||||
|
minor = struct.unpack("H", f.read(2))[0] |
||||||
|
assert(major == 1 and minor == 0) |
||||||
|
|
||||||
|
# Header sizes |
||||||
|
_ = struct.unpack("H", f.read(2))[0] |
||||||
|
_ = struct.unpack("H", f.read(2))[0] |
||||||
|
|
||||||
|
block_sz = struct.unpack("I", f.read(4))[0] |
||||||
|
_ = struct.unpack("I", f.read(4))[0] |
||||||
|
num_chunks = struct.unpack("I", f.read(4))[0] |
||||||
|
_ = struct.unpack("I", f.read(4))[0] |
||||||
|
|
||||||
|
for _ in range(num_chunks): |
||||||
|
chunk_type = struct.unpack("H", f.read(2))[0] |
||||||
|
_ = struct.unpack("H", f.read(2))[0] |
||||||
|
out_blocks = struct.unpack("I", f.read(4))[0] |
||||||
|
_ = struct.unpack("I", f.read(4))[0] |
||||||
|
|
||||||
|
if chunk_type == 0xcac1: # Raw |
||||||
|
# TODO: yield in smaller chunks. Yielding only block_sz is too slow. Largest observed data chunk is 252 MB. |
||||||
|
yield f.read(out_blocks * block_sz) |
||||||
|
elif chunk_type == 0xcac2: # Fill |
||||||
|
filler = f.read(4) * (block_sz // 4) |
||||||
|
for _ in range(out_blocks): |
||||||
|
yield filler |
||||||
|
elif chunk_type == 0xcac3: # Don't care |
||||||
|
yield b"" |
||||||
|
else: |
||||||
|
raise Exception("Unhandled sparse chunk type") |
||||||
|
|
||||||
|
|
||||||
|
def flash_agnos_update(manifest_path, cloudlog, spinner=None): |
||||||
|
update = json.load(open(manifest_path)) |
||||||
|
|
||||||
|
current_slot = subprocess.check_output(["abctl", "--boot_slot"], encoding='utf-8').strip() |
||||||
|
target_slot = "_b" if current_slot == "_a" else "_a" |
||||||
|
target_slot_number = "0" if target_slot == "_a" else "1" |
||||||
|
|
||||||
|
cloudlog.info(f"Current slot {current_slot}, target slot {target_slot}") |
||||||
|
|
||||||
|
# set target slot as unbootable |
||||||
|
os.system(f"abctl --set_unbootable {target_slot_number}") |
||||||
|
|
||||||
|
for partition in update: |
||||||
|
cloudlog.info(f"Downloading and writing {partition['name']}") |
||||||
|
|
||||||
|
downloader = StreamingDecompressor(partition['url']) |
||||||
|
with open(f"/dev/disk/by-partlabel/{partition['name']}{target_slot}", 'wb') as out: |
||||||
|
partition_size = partition['size'] |
||||||
|
# Clear hash before flashing |
||||||
|
out.seek(partition_size) |
||||||
|
out.write(b"\x00" * 64) |
||||||
|
out.seek(0) |
||||||
|
os.sync() |
||||||
|
|
||||||
|
# Flash partition |
||||||
|
if partition['sparse']: |
||||||
|
raw_hash = hashlib.sha256() |
||||||
|
for chunk in unsparsify(downloader): |
||||||
|
raw_hash.update(chunk) |
||||||
|
out.write(chunk) |
||||||
|
|
||||||
|
if spinner is not None: |
||||||
|
spinner.update_progress(out.tell(), partition_size) |
||||||
|
|
||||||
|
if raw_hash.hexdigest().lower() != partition['hash_raw'].lower(): |
||||||
|
raise Exception(f"Unsparse hash mismatch '{raw_hash.hexdigest().lower()}'") |
||||||
|
else: |
||||||
|
while not downloader.eof: |
||||||
|
out.write(downloader.read(1024 * 1024)) |
||||||
|
|
||||||
|
if spinner is not None: |
||||||
|
spinner.update_progress(out.tell(), partition_size) |
||||||
|
|
||||||
|
if downloader.sha256.hexdigest().lower() != partition['hash'].lower(): |
||||||
|
raise Exception("Uncompressed hash mismatch") |
||||||
|
|
||||||
|
if out.tell() != partition['size']: |
||||||
|
raise Exception("Uncompressed size mismatch") |
||||||
|
|
||||||
|
# Write hash after successfull flash |
||||||
|
os.sync() |
||||||
|
out.write(partition['hash_raw'].lower().encode()) |
||||||
|
|
||||||
|
cloudlog.info(f"AGNOS ready on slot {target_slot}") |
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__": |
||||||
|
import logging |
||||||
|
import time |
||||||
|
import sys |
||||||
|
|
||||||
|
if len(sys.argv) != 2: |
||||||
|
print("Usage: ./agnos.py <manifest.json>") |
||||||
|
exit(1) |
||||||
|
|
||||||
|
spinner = Spinner() |
||||||
|
spinner.update("Updating AGNOS") |
||||||
|
time.sleep(5) |
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO) |
||||||
|
flash_agnos_update(sys.argv[1], logging, spinner) |
Loading…
Reference in new issue