7 #define FIFO_RX_W_MARK (6)
8 #define FIFO_TX_W_MARK (3)
10 #define BYTES_PER_FRAME(reg_data) \
11 ((((reg_data & QM_SS_SPI_CTRL_DFS_MASK) >> QM_SS_SPI_CTRL_DFS_OFFS) >> \
15 static uint32_t base[QM_SS_SPI_NUM] = {QM_SS_SPI_0_BASE, QM_SS_SPI_1_BASE};
18 static uint32_t rx_c[QM_SS_SPI_NUM];
19 static uint32_t tx_c[QM_SS_SPI_NUM];
21 static const uint16_t dummy_frame = 0;
29 QM_SS_SPI_INTERRUPT_MASK_WRITE(0, base[spi]);
31 QM_SS_SPI_INTERRUPT_CLEAR_WRITE(QM_SS_SPI_INTR_ALL, base[spi]);
34 static __inline__
void fifo_write(
const qm_ss_spi_t spi,
const void *data,
40 dr = *(uint8_t *)data;
41 }
else if (size == 2) {
42 dr = *(uint16_t *)data;
44 dr = *(uint32_t *)data;
46 dr |= QM_SS_SPI_DR_W_MASK;
51 static __inline__
void fifo_read(
const qm_ss_spi_t spi,
void *data,
54 QM_SS_SPI_DUMMY_WRITE(base[spi]);
56 *(uint8_t *)data = __builtin_arc_lr(base[spi] +
QM_SS_SPI_DR);
57 }
else if (size == 2) {
58 *(uint16_t *)data = __builtin_arc_lr(base[spi] +
QM_SS_SPI_DR);
60 *(uint32_t *)data = __builtin_arc_lr(base[spi] +
QM_SS_SPI_DR);
68 QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
69 QM_CHECK(cfg, -EINVAL);
75 QM_SS_SPI_SPIEN_EN)) {
80 QM_SS_SPI_ENABLE_REG_WRITES(base[spi]);
82 ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
83 ctrl &= ~(QM_SS_SPI_CTRL_DFS_MASK | QM_SS_SPI_CTRL_TMOD_MASK |
84 QM_SS_SPI_CTRL_BMOD_MASK);
85 ctrl |= cfg->
frame_size << QM_SS_SPI_CTRL_DFS_OFFS;
87 ctrl |= cfg->
bus_mode << QM_SS_SPI_CTRL_BMOD_OFFS;
89 QM_SS_SPI_CTRL_WRITE(ctrl, base[spi]);
91 QM_SS_SPI_BAUD_RATE_WRITE(cfg->
clk_divider, base[spi]);
99 QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
102 if (__builtin_arc_lr(base[spi] +
QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY) {
106 QM_SS_SPI_SER_WRITE(ss, base[spi]);
114 QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
115 QM_CHECK(status, -EINVAL);
117 if (__builtin_arc_lr(base[spi] +
QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY) {
130 QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
131 QM_CHECK(xfer, -EINVAL);
133 uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
134 uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
135 QM_SS_SPI_CTRL_TMOD_OFFS);
146 uint32_t tx_cnt = xfer->
tx_len;
147 uint32_t rx_cnt = xfer->
rx_len;
148 uint8_t *rx_buffer = xfer->
rx;
149 uint8_t *tx_buffer = xfer->
tx;
153 uint8_t bytes = BYTES_PER_FRAME(ctrl);
156 QM_SS_SPI_INTERRUPT_MASK_WRITE(0, base[spi]);
160 QM_SS_SPI_NDF_WRITE((xfer->
rx_len - 1), base[spi]);
165 tx_buffer = (uint8_t *)&dummy_frame;
172 while (tx_cnt || rx_cnt) {
175 if (QM_SS_SPI_INTERRUPT_STATUS_READ(base[spi]) &
176 QM_SS_SPI_INTR_RXOI) {
184 if (sr & QM_SS_SPI_SR_RFNE && rx_cnt) {
185 fifo_read(spi, rx_buffer, bytes);
190 if (sr & QM_SS_SPI_SR_TFNF && tx_cnt) {
191 fifo_write(spi, tx_buffer, bytes);
197 while (__builtin_arc_lr(base[spi] +
QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY)
209 QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
210 QM_CHECK(xfer, -EINVAL);
213 uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
214 uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
215 QM_SS_SPI_CTRL_TMOD_OFFS);
216 uint8_t bytes = BYTES_PER_FRAME(ctrl);
225 spi_async_transfer[spi] = xfer;
231 QM_SS_SPI_NDF_WRITE((xfer->
rx_len - 1), base[spi]);
235 (((FIFO_RX_W_MARK < xfer->
rx_len ? FIFO_RX_W_MARK : xfer->
rx_len) -
237 tftlr = FIFO_TX_W_MARK;
240 QM_SS_SPI_RFTLR_WRITE(rftlr, base[spi]);
241 QM_SS_SPI_TFTLR_WRITE(tftlr, base[spi]);
244 QM_SS_SPI_INTERRUPT_MASK_WRITE(QM_SS_SPI_INTR_ALL, base[spi]);
251 fifo_write(spi, (uint8_t *)&dummy_frame, bytes);
259 QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
261 spi_async_transfer[spi];
269 ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
270 tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
271 QM_SS_SPI_CTRL_TMOD_OFFS);
274 len = transfer->
tx_len - tx_c[spi];
276 len = transfer->
rx_len - rx_c[spi];
290 static void handle_spi_err_interrupt(
const qm_ss_spi_t spi)
292 uint32_t intr_stat = QM_SS_SPI_INTERRUPT_STATUS_READ(base[spi]);
294 spi_async_transfer[spi];
298 #if HAS_SS_SPI_VERBOSE_ERROR
299 if ((intr_stat & QM_SS_SPI_INTR_TXOI) && transfer->
callback) {
302 transfer->
tx_len - tx_c[spi]);
305 if ((intr_stat & QM_SS_SPI_INTR_RXUI) && transfer->
callback) {
308 transfer->
rx_len - rx_c[spi]);
311 QM_ASSERT((intr_stat & QM_SS_SPI_INTR_STAT_TXOI) == 0);
312 QM_ASSERT((intr_stat & QM_SS_SPI_INTR_STAT_RXUI) == 0);
315 if ((intr_stat & QM_SS_SPI_INTR_RXOI) && transfer->
callback) {
318 transfer->
rx_len - rx_c[spi]);
322 static void handle_spi_tx_interrupt(
const qm_ss_spi_t spi)
324 uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
326 uint8_t bytes = BYTES_PER_FRAME(ctrl);
327 uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
328 QM_SS_SPI_CTRL_TMOD_OFFS);
333 spi_async_transfer[spi];
336 QM_SS_SPI_INTERRUPT_CLEAR_WRITE(QM_SS_SPI_INTR_TXEI, base[spi]);
342 const uint8_t *tx_buffer =
343 transfer->
tx + ((transfer->
tx_len - tx_c[spi]) * bytes);
345 if (tx_c[spi] == 0 &&
346 !(__builtin_arc_lr(base[spi] +
QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY)) {
355 QM_SS_SPI_INTERRUPT_MASK_NAND(QM_SS_SPI_INTR_TXEI,
363 cnt = QM_SS_SPI_FIFO_DEPTH - rxflr - txflr - 1;
364 while (tx_c[spi] && cnt > 0) {
365 fifo_write(spi, tx_buffer, bytes);
372 static void handle_spi_rx_interrupt(
const qm_ss_spi_t spi)
374 uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
376 uint8_t bytes = BYTES_PER_FRAME(ctrl);
378 spi_async_transfer[spi];
379 uint32_t new_irq_level = 0;
382 QM_SS_SPI_INTERRUPT_CLEAR_WRITE(QM_SS_SPI_INTR_RXFI, base[spi]);
390 transfer->
rx + ((transfer->
rx_len - rx_c[spi]) * bytes);
392 while (__builtin_arc_lr(base[spi] +
QM_SS_SPI_SR) & QM_SS_SPI_SR_RFNE &&
394 fifo_read(spi, rx_buffer, bytes);
400 (FIFO_RX_W_MARK < rx_c[spi] ? FIFO_RX_W_MARK : rx_c[spi]);
403 QM_SS_SPI_RFTLR_WRITE(new_irq_level, base[spi]);
438 #if (ENABLE_RESTORE_CONTEXT)
442 const uint32_t controller = base[spi];
444 QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
445 QM_CHECK(ctx != NULL, -EINVAL);
457 const uint32_t controller = base[spi];
459 QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
460 QM_CHECK(ctx != NULL, -EINVAL);
qm_ss_spi_slave_select_t
SPI Slave select type.
uint32_t spi_timing
Timing Register.
int qm_ss_spi_irq_transfer_terminate(const qm_ss_spi_t spi)
Terminate SPI IRQ transfer.
int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi, const qm_ss_spi_async_transfer_t *const xfer)
Initiate an interrupt based SPI transfer.
uint16_t tx_len
Number of data frames to write.
Sensor Subsystem SPI context type.
TX transfer has overflown.
uint16_t tx_len
Number of data frames to write.
SPI asynchronous transfer type.
int qm_ss_spi_set_config(const qm_ss_spi_t spi, const qm_ss_spi_config_t *const cfg)
Set SPI configuration.
int qm_ss_spi_get_status(const qm_ss_spi_t spi, qm_ss_spi_status_t *const status)
Get SPI bus status.
int qm_ss_spi_slave_select(const qm_ss_spi_t spi, const qm_ss_spi_slave_select_t ss)
Set Slave Select lines.
int qm_ss_spi_transfer(const qm_ss_spi_t spi, const qm_ss_spi_transfer_t *const xfer, qm_ss_spi_status_t *const status)
Perform a blocking SPI transfer.
RX transfer has underflown.
void(* callback)(void *data, int error, qm_ss_spi_status_t status, uint16_t len)
Transfer callback.
qm_ss_spi_tmode_t transfer_mode
Transfer mode (enum)
SPI serial clock divider value.
Number of valid data entries in TX FIFO.
SPI device is not in use.
int qm_ss_spi_save_context(const qm_ss_spi_t spi, qm_ss_spi_context_t *const ctx)
Save SS SPI context.
qm_ss_spi_frame_size_t frame_size
Frame Size.
uint16_t clk_divider
SCK = SPI_clock/clk_divider.
int qm_ss_spi_restore_context(const qm_ss_spi_t spi, const qm_ss_spi_context_t *const ctx)
Restore SS SPI context.
uint16_t rx_len
Number of data frames to read.
uint32_t spi_ctrl
Control Register.
uint32_t spi_spien
SPI Enable Register.
qm_ss_spi_t
Sensor Subsystem SPI modules.
void * callback_data
Callback user data.
Number of valid data entries in RX FIFO.
QM_ISR_DECLARE(qm_ss_spi_0_error_isr)
ISR for SPI 0 error interrupt.
qm_ss_spi_status_t
SPI status.
SPI synchronous transfer type.
RX transfer has overflown.
uint16_t rx_len
Number of data frames to read.
qm_ss_spi_bmode_t bus_mode
Bus mode (enum)