pandad/SPI: ensure slave is in a consistent state (#32645)

* maxout

* get ready for the next one

* really get ready

* much better

---------

Co-authored-by: Comma Device <device@comma.ai>
pull/32695/head
Adeeb Shihadeh 11 months ago committed by GitHub
parent 71063d66cd
commit f8cb04e4a8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 5
      selfdrive/pandad/panda.cc
  2. 45
      selfdrive/pandad/spi.cc
  3. 2
      selfdrive/pandad/tests/test_pandad_spi.py

@ -221,6 +221,11 @@ bool Panda::can_receive(std::vector<can_frame>& out_vec) {
return false; return false;
} }
if (getenv("PANDAD_MAXOUT") != NULL) {
static uint8_t junk[RECV_SIZE];
handle->bulk_read(0xab, junk, RECV_SIZE - recv);
}
bool ret = true; bool ret = true;
if (recv > 0) { if (recv > 0) {
receive_buffer_size += recv; receive_buffer_size += recv;

@ -50,8 +50,9 @@ private:
#define SPILOG(fn, fmt, ...) do { \ #define SPILOG(fn, fmt, ...) do { \
fn(fmt, ## __VA_ARGS__); \ fn(fmt, ## __VA_ARGS__); \
fn(" %d / 0x%x / %d / %d", \ fn(" %d / 0x%x / %d / %d / tx: %s", \
xfer_count, header.endpoint, header.tx_len, header.max_rx_len); \ xfer_count, header.endpoint, header.tx_len, header.max_rx_len, \
util::hexdump(tx_buf, std::min((int)header.tx_len, 8)).c_str()); \
} while (0) } while (0)
PandaSpiHandle::PandaSpiHandle(std::string serial) : PandaCommsHandle(serial) { PandaSpiHandle::PandaSpiHandle(std::string serial) : PandaCommsHandle(serial) {
@ -238,6 +239,7 @@ int PandaSpiHandle::spi_transfer_retry(uint8_t endpoint, uint8_t *tx_data, uint1
// due to full TX buffers // due to full TX buffers
nack_count += 1; nack_count += 1;
if (nack_count > 3) { if (nack_count > 3) {
SPILOG(LOGE, "NACK sleep %d", nack_count);
usleep(std::clamp(nack_count*10, 200, 2000)); usleep(std::clamp(nack_count*10, 200, 2000));
} }
} }
@ -256,14 +258,14 @@ int PandaSpiHandle::wait_for_ack(uint8_t ack, uint8_t tx, unsigned int timeout,
if (timeout == 0) { if (timeout == 0) {
timeout = SPI_ACK_TIMEOUT; timeout = SPI_ACK_TIMEOUT;
} }
timeout = std::clamp(timeout, 100U, SPI_ACK_TIMEOUT); timeout = std::clamp(timeout, 20U, SPI_ACK_TIMEOUT);
spi_ioc_transfer transfer = { spi_ioc_transfer transfer = {
.tx_buf = (uint64_t)tx_buf, .tx_buf = (uint64_t)tx_buf,
.rx_buf = (uint64_t)rx_buf, .rx_buf = (uint64_t)rx_buf,
.len = length .len = length,
}; };
tx_buf[0] = tx; memset(tx_buf, tx, length);
while (true) { while (true) {
int ret = lltransfer(transfer); int ret = lltransfer(transfer);
@ -275,13 +277,13 @@ int PandaSpiHandle::wait_for_ack(uint8_t ack, uint8_t tx, unsigned int timeout,
if (rx_buf[0] == ack) { if (rx_buf[0] == ack) {
break; break;
} else if (rx_buf[0] == SPI_NACK) { } else if (rx_buf[0] == SPI_NACK) {
SPILOG(LOGD, "SPI: got NACK"); SPILOG(LOGD, "SPI: got NACK, waiting for 0x%x", ack);
return SpiError::NACK; return SpiError::NACK;
} }
// handle timeout // handle timeout
if (millis_since_boot() - start_millis > timeout) { if (millis_since_boot() - start_millis > timeout) {
SPILOG(LOGW, "SPI: timed out waiting for ACK"); SPILOG(LOGW, "SPI: timed out waiting for ACK, waiting for 0x%x", ack);
return SpiError::ACK_TIMEOUT; return SpiError::ACK_TIMEOUT;
} }
} }
@ -352,13 +354,13 @@ int PandaSpiHandle::spi_transfer(uint8_t endpoint, uint8_t *tx_data, uint16_t tx
ret = lltransfer(transfer); ret = lltransfer(transfer);
if (ret < 0) { if (ret < 0) {
SPILOG(LOGE, "SPI: failed to send header"); SPILOG(LOGE, "SPI: failed to send header");
return ret; goto fail;
} }
// Wait for (N)ACK // Wait for (N)ACK
ret = wait_for_ack(SPI_HACK, 0x11, timeout, 1); ret = wait_for_ack(SPI_HACK, 0x11, timeout, 1);
if (ret < 0) { if (ret < 0) {
return ret; goto fail;
} }
// Send data // Send data
@ -370,20 +372,20 @@ int PandaSpiHandle::spi_transfer(uint8_t endpoint, uint8_t *tx_data, uint16_t tx
ret = lltransfer(transfer); ret = lltransfer(transfer);
if (ret < 0) { if (ret < 0) {
SPILOG(LOGE, "SPI: failed to send data"); SPILOG(LOGE, "SPI: failed to send data");
return ret; goto fail;
} }
// Wait for (N)ACK // Wait for (N)ACK
ret = wait_for_ack(SPI_DACK, 0x13, timeout, 3); ret = wait_for_ack(SPI_DACK, 0x13, timeout, 3);
if (ret < 0) { if (ret < 0) {
return ret; goto fail;
} }
// Read data // Read data
rx_data_len = *(uint16_t *)(rx_buf+1); rx_data_len = *(uint16_t *)(rx_buf+1);
if (rx_data_len >= SPI_BUF_SIZE) { if (rx_data_len >= SPI_BUF_SIZE) {
SPILOG(LOGE, "SPI: RX data len larger than buf size %d", rx_data_len); SPILOG(LOGE, "SPI: RX data len larger than buf size %d", rx_data_len);
return -1; goto fail;
} }
transfer.len = rx_data_len + 1; transfer.len = rx_data_len + 1;
@ -391,11 +393,11 @@ int PandaSpiHandle::spi_transfer(uint8_t endpoint, uint8_t *tx_data, uint16_t tx
ret = lltransfer(transfer); ret = lltransfer(transfer);
if (ret < 0) { if (ret < 0) {
SPILOG(LOGE, "SPI: failed to read rx data"); SPILOG(LOGE, "SPI: failed to read rx data");
return ret; goto fail;
} }
if (!check_checksum(rx_buf, rx_data_len + 4)) { if (!check_checksum(rx_buf, rx_data_len + 4)) {
SPILOG(LOGE, "SPI: bad checksum"); SPILOG(LOGE, "SPI: bad checksum");
return -1; goto fail;
} }
if (rx_data != NULL) { if (rx_data != NULL) {
@ -403,5 +405,20 @@ int PandaSpiHandle::spi_transfer(uint8_t endpoint, uint8_t *tx_data, uint16_t tx
} }
return rx_data_len; return rx_data_len;
fail:
// ensure slave is in a consistent state
// and ready for the next transfer
int nack_cnt = 0;
while (nack_cnt < 3) {
if (wait_for_ack(SPI_NACK, 0x14, 1, SPI_BUF_SIZE/2) == 0) {
nack_cnt += 1;
} else {
nack_cnt = 0;
}
}
if (ret > 0) ret = -1;
return ret;
} }
#endif #endif

@ -97,7 +97,7 @@ class TestBoarddSpi:
with subtests.test(msg="timing check", service=service): with subtests.test(msg="timing check", service=service):
edt = 1e3 / SERVICE_LIST[service].frequency edt = 1e3 / SERVICE_LIST[service].frequency
assert edt*0.9 < np.mean(dts) < edt*1.1 assert edt*0.9 < np.mean(dts) < edt*1.1
assert np.max(dts) < edt*20 assert np.max(dts) < edt*8
assert np.min(dts) < edt assert np.min(dts) < edt
assert len(dts) >= ((et-0.5)*SERVICE_LIST[service].frequency*0.8) assert len(dts) >= ((et-0.5)*SERVICE_LIST[service].frequency*0.8)

Loading…
Cancel
Save