Fixes for PYTHONWARNINGS=error (#29381)

old-commit-hash: db287496d8
beeps
Adeeb Shihadeh 2 years ago committed by GitHub
parent 01beb57506
commit f50fedb647
  1. 8
      common/spinner.py
  2. 7
      selfdrive/boardd/tests/test_boardd_loopback.py
  3. 138
      selfdrive/statsd.py
  4. 3
      system/loggerd/tests/test_loggerd.py
  5. 7
      tools/bodyteleop/web.py

@ -29,11 +29,11 @@ class Spinner():
def close(self):
if self.spinner_proc is not None:
self.spinner_proc.kill()
try:
self.spinner_proc.stdin.close()
except BrokenPipeError:
pass
self.spinner_proc.terminate()
self.spinner_proc.communicate(timeout=2.)
except subprocess.TimeoutExpired:
print("WARNING: failed to kill spinner")
self.spinner_proc = None
def __del__(self):

@ -10,7 +10,6 @@ from pprint import pprint
import cereal.messaging as messaging
from cereal import car, log
from common.params import Params
from common.spinner import Spinner
from common.timeout import Timeout
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car import make_can_msg
@ -24,11 +23,6 @@ class TestBoardd(unittest.TestCase):
def setUpClass(cls):
os.environ['STARTED'] = '1'
os.environ['BOARDD_LOOPBACK'] = '1'
cls.spinner = Spinner()
@classmethod
def tearDownClass(cls):
cls.spinner.close()
@phone_only
@with_processes(['pandad'])
@ -67,7 +61,6 @@ class TestBoardd(unittest.TestCase):
n = 200
for i in range(n):
print(f"boardd loopback {i}/{n}")
self.spinner.update(f"boardd loopback {i}/{n}")
sent_msgs = defaultdict(set)
for _ in range(random.randrange(20, 100)):

@ -23,6 +23,8 @@ class METRIC_TYPE:
class StatLog:
def __init__(self):
self.pid = None
self.zctx = None
self.sock = None
def connect(self) -> None:
self.zctx = zmq.Context()
@ -31,6 +33,12 @@ class StatLog:
self.sock.connect(STATS_SOCKET)
self.pid = os.getpid()
def __del__(self):
if self.sock is not None:
self.sock.close()
if self.zctx is not None:
self.zctx.term()
def _send(self, metric: str) -> None:
if os.getpid() != self.pid:
self.connect()
@ -68,7 +76,7 @@ def main() -> NoReturn:
return res
# open statistics socket
ctx = zmq.Context().instance()
ctx = zmq.Context.instance()
sock = ctx.socket(zmq.PULL)
sock.bind(STATS_SOCKET)
@ -92,70 +100,74 @@ def main() -> NoReturn:
last_flush_time = time.monotonic()
gauges = {}
samples: Dict[str, List[float]] = defaultdict(list)
while True:
started_prev = sm['deviceState'].started
sm.update()
# Update metrics
try:
while True:
try:
metric = sock.recv_string(zmq.NOBLOCK)
started_prev = sm['deviceState'].started
sm.update()
# Update metrics
while True:
try:
metric_type = metric.split('|')[1]
metric_name = metric.split(':')[0]
metric_value = float(metric.split('|')[0].split(':')[1])
if metric_type == METRIC_TYPE.GAUGE:
gauges[metric_name] = metric_value
elif metric_type == METRIC_TYPE.SAMPLE:
samples[metric_name].append(metric_value)
else:
cloudlog.event("unknown metric type", metric_type=metric_type)
except Exception:
cloudlog.event("malformed metric", metric=metric)
except zmq.error.Again:
break
# flush when started state changes or after FLUSH_TIME_S
if (time.monotonic() > last_flush_time + STATS_FLUSH_TIME_S) or (sm['deviceState'].started != started_prev):
result = ""
current_time = datetime.utcnow().replace(tzinfo=timezone.utc)
tags['started'] = sm['deviceState'].started
for key, value in gauges.items():
result += get_influxdb_line(f"gauge.{key}", value, current_time, tags)
for key, values in samples.items():
values.sort()
sample_count = len(values)
sample_sum = sum(values)
stats = {
'count': sample_count,
'min': values[0],
'max': values[-1],
'mean': sample_sum / sample_count,
}
for percentile in [0.05, 0.5, 0.95]:
value = values[int(round(percentile * (sample_count - 1)))]
stats[f"p{int(percentile * 100)}"] = value
result += get_influxdb_line(f"sample.{key}", stats, current_time, tags)
# clear intermediate data
gauges.clear()
samples.clear()
last_flush_time = time.monotonic()
# check that we aren't filling up the drive
if len(os.listdir(STATS_DIR)) < STATS_DIR_FILE_LIMIT:
if len(result) > 0:
stats_path = os.path.join(STATS_DIR, f"{current_time.timestamp():.0f}_{idx}")
with atomic_write_in_dir(stats_path) as f:
f.write(result)
idx += 1
else:
cloudlog.error("stats dir full")
metric = sock.recv_string(zmq.NOBLOCK)
try:
metric_type = metric.split('|')[1]
metric_name = metric.split(':')[0]
metric_value = float(metric.split('|')[0].split(':')[1])
if metric_type == METRIC_TYPE.GAUGE:
gauges[metric_name] = metric_value
elif metric_type == METRIC_TYPE.SAMPLE:
samples[metric_name].append(metric_value)
else:
cloudlog.event("unknown metric type", metric_type=metric_type)
except Exception:
cloudlog.event("malformed metric", metric=metric)
except zmq.error.Again:
break
# flush when started state changes or after FLUSH_TIME_S
if (time.monotonic() > last_flush_time + STATS_FLUSH_TIME_S) or (sm['deviceState'].started != started_prev):
result = ""
current_time = datetime.utcnow().replace(tzinfo=timezone.utc)
tags['started'] = sm['deviceState'].started
for key, value in gauges.items():
result += get_influxdb_line(f"gauge.{key}", value, current_time, tags)
for key, values in samples.items():
values.sort()
sample_count = len(values)
sample_sum = sum(values)
stats = {
'count': sample_count,
'min': values[0],
'max': values[-1],
'mean': sample_sum / sample_count,
}
for percentile in [0.05, 0.5, 0.95]:
value = values[int(round(percentile * (sample_count - 1)))]
stats[f"p{int(percentile * 100)}"] = value
result += get_influxdb_line(f"sample.{key}", stats, current_time, tags)
# clear intermediate data
gauges.clear()
samples.clear()
last_flush_time = time.monotonic()
# check that we aren't filling up the drive
if len(os.listdir(STATS_DIR)) < STATS_DIR_FILE_LIMIT:
if len(result) > 0:
stats_path = os.path.join(STATS_DIR, f"{current_time.timestamp():.0f}_{idx}")
with atomic_write_in_dir(stats_path) as f:
f.write(result)
idx += 1
else:
cloudlog.error("stats dir full")
finally:
sock.close()
ctx.term()
if __name__ == "__main__":

@ -210,7 +210,8 @@ class TestLoggerd(unittest.TestCase):
for fn in ["console-ramoops", "pmsg-ramoops-0"]:
path = Path(os.path.join("/sys/fs/pstore/", fn))
if path.is_file():
expected_val = open(path, "rb").read()
with open(path, "rb") as f:
expected_val = f.read()
bootlog_val = [e.value for e in boot.pstore.entries if e.key == fn][0]
self.assertEqual(expected_val, bootlog_val)

@ -6,11 +6,16 @@ import ssl
import uuid
import time
from common.basedir import BASEDIR
# aiortc and its dependencies have lots of internal warnings :(
import warnings
warnings.resetwarnings()
warnings.simplefilter("always")
from aiohttp import web
from aiortc import RTCPeerConnection, RTCSessionDescription
import cereal.messaging as messaging
from common.basedir import BASEDIR
from tools.bodyteleop.bodyav import BodyMic, WebClientSpeaker, force_codec, play_sound, MediaBlackhole, EncodedBodyVideo
logger = logging.getLogger("pc")

Loading…
Cancel
Save