8 #define SPI_MST_DEFAULT_TX_THRESHOLD (0x05)
9 #define SPI_MST_DEFAULT_RX_THRESHOLD (0x05)
10 #define SPI_SLV_DEFAULT_TX_THRESHOLD (0x04)
11 #define SPI_SLV_DEFAULT_RX_THRESHOLD (0x03)
12 #define SPI_FIFOS_DEPTH (8)
26 #define SPI_DMATDLR_DMATDL (0x4)
27 #define SPI_DMA_WRITE_BURST_LENGTH QM_DMA_BURST_TRANS_LENGTH_4
50 #define SPI_DMARDLR_DMARDL (0x03)
51 #define SPI_DMA_READ_BURST_LENGTH QM_DMA_BURST_TRANS_LENGTH_4
58 volatile bool cb_pending;
76 static volatile uint16_t tx_counter[QM_SPI_NUM];
77 static volatile uint16_t rx_counter[QM_SPI_NUM];
78 static uint8_t dfs[QM_SPI_NUM];
79 static const uint32_t tx_dummy_frame = 0;
83 static dma_context_t dma_context_tx[QM_SPI_NUM];
85 static dma_context_t dma_context_rx[QM_SPI_NUM];
87 static qm_dma_t dma_core[QM_SPI_NUM];
89 static void read_frame(
const qm_spi_t spi, uint8_t *
const rx_buffer)
92 const uint8_t frame_size = dfs[spi];
94 if (frame_size == 1) {
95 *(uint8_t *)rx_buffer = controller->
dr[0];
96 }
else if (frame_size == 2) {
97 *(uint16_t *)rx_buffer = controller->
dr[0];
99 *(uint32_t *)rx_buffer = controller->
dr[0];
103 static void write_frame(
const qm_spi_t spi,
const uint8_t *
const tx_buffer)
106 const uint8_t frame_size = dfs[spi];
108 if (frame_size == 1) {
109 controller->
dr[0] = *(uint8_t *)tx_buffer;
110 }
else if (frame_size == 2) {
111 controller->
dr[0] = *(uint16_t *)tx_buffer;
113 controller->
dr[0] = *(uint32_t *)tx_buffer;
117 static void wait_for_controller(
const qm_spi_reg_t *
const controller)
123 while (!(controller->
sr & QM_SPI_SR_TFE))
125 while (controller->
sr & QM_SPI_SR_BUSY)
132 static __inline__
void handle_mst_rx_interrupt(
const qm_spi_t spi)
136 spi_async_transfer[spi];
142 uint8_t *rx_buffer = transfer->
rx + (rx_counter[spi] * dfs[spi]);
144 while (controller->
rxflr) {
145 read_frame(spi, rx_buffer);
146 rx_buffer += dfs[spi];
152 if (transfer->
rx_len == rx_counter[spi]) {
154 controller->
imr = QM_SPI_IMR_MASK_ALL;
163 ~(QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM |
173 const uint32_t frames_left = transfer->
rx_len - rx_counter[spi];
174 if (frames_left <= controller->rxftlr) {
175 controller->
rxftlr = frames_left - 1;
185 static __inline__
void handle_mst_tx_interrupt(
const qm_spi_t spi)
189 spi_async_transfer[spi];
195 const uint8_t *tx_buffer = transfer->
tx + (tx_counter[spi] * dfs[spi]);
198 SPI_FIFOS_DEPTH - controller->
txflr - controller->
rxflr - 1;
201 write_frame(spi, tx_buffer);
202 tx_buffer += dfs[spi];
206 if (transfer->
tx_len == tx_counter[spi]) {
216 static __inline__
void handle_slv_rx_interrupt(
const qm_spi_t spi)
220 spi_async_transfer[spi];
222 uint8_t *rx_buffer = transfer->
rx + (rx_counter[spi] * dfs[spi]);
223 uint16_t len = transfer->
rx_len;
224 int32_t frames_left = 0;
226 while (controller->
rxflr) {
228 if (rx_counter[spi] < len) {
230 read_frame(spi, rx_buffer);
231 rx_buffer += dfs[spi];
234 if (rx_counter[spi] == len) {
250 if (!rx_counter[spi]) {
254 rx_buffer = transfer->
rx;
266 frames_left = len - rx_counter[spi];
267 if (frames_left > 0 && (uint32_t)frames_left <= controller->rxftlr) {
268 controller->
rxftlr = frames_left - 1;
275 static __inline__
void handle_slv_tx_interrupt(
const qm_spi_t spi)
279 spi_async_transfer[spi];
280 const uint8_t *tx_buffer = transfer->
tx + (tx_counter[spi] * dfs[spi]);
281 uint16_t len = transfer->
tx_len;
283 SPI_FIFOS_DEPTH - controller->
txflr - controller->
rxflr - 1;
285 while (entries_free > 0 && tx_counter[spi] < len) {
287 write_frame(spi, tx_buffer);
288 tx_buffer += dfs[spi];
293 if (tx_counter[spi] == len) {
308 if (!tx_counter[spi]) {
310 tx_buffer = transfer->
tx;
317 if (tx_counter[spi] >= len) {
322 static void handle_spi_overflow_interrupt(
const qm_spi_t spi)
326 spi_async_transfer[spi];
335 controller->
imr = QM_SPI_IMR_MASK_ALL;
339 static void handle_spi_mst_interrupt(
const qm_spi_t spi)
343 spi_async_transfer[spi];
344 const uint32_t int_status = controller->
isr;
346 QM_ASSERT((int_status & (QM_SPI_ISR_TXOIS | QM_SPI_ISR_RXUIS)) == 0);
349 if (int_status & QM_SPI_ISR_RXOIS) {
350 handle_spi_overflow_interrupt(spi);
354 if (int_status & QM_SPI_ISR_RXFIS) {
355 handle_mst_rx_interrupt(spi);
358 if (transfer->
rx_len == rx_counter[spi] &&
359 transfer->
tx_len == tx_counter[spi] &&
360 (controller->
sr & QM_SPI_SR_TFE) &&
361 !(controller->
sr & QM_SPI_SR_BUSY)) {
362 controller->
imr = QM_SPI_IMR_MASK_ALL;
373 if (int_status & QM_SPI_ISR_TXEIS &&
374 transfer->
tx_len > tx_counter[spi]) {
375 handle_mst_tx_interrupt(spi);
379 static void handle_spi_slv_interrupt(
const qm_spi_t spi)
383 spi_async_transfer[spi];
384 const uint32_t int_status = controller->
isr;
386 QM_ASSERT((int_status & (QM_SPI_ISR_TXOIS | QM_SPI_ISR_RXUIS)) == 0);
388 if (int_status & QM_SPI_ISR_RXOIS) {
390 handle_spi_overflow_interrupt(spi);
394 if (int_status & QM_SPI_ISR_RXFIS) {
396 handle_slv_rx_interrupt(spi);
399 if (transfer->
rx_len == rx_counter[spi] &&
400 transfer->
tx_len == tx_counter[spi] &&
401 (controller->
sr & QM_SPI_SR_TFE) &&
402 (!(controller->
sr & QM_SPI_SR_BUSY) ||
408 controller->
imr = QM_SPI_IMR_MASK_ALL;
417 if (int_status & QM_SPI_ISR_TXEIS) {
419 handle_slv_tx_interrupt(spi);
425 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
426 QM_CHECK(cfg, -EINVAL);
427 QM_CHECK(QM_SPI_SLV_0 == spi
432 if (0 != QM_SPI[spi]->ssienr) {
439 controller->
ctrlr0 = (cfg->frame_size << QM_SPI_CTRLR0_DFS_32_OFFSET) |
440 (cfg->transfer_mode << QM_SPI_CTRLR0_TMOD_OFFSET) |
441 (cfg->bus_mode << QM_SPI_CTRLR0_SCPOL_SCPH_OFFSET);
447 if (QM_SPI_SLV_0 != spi) {
448 controller->
baudr = cfg->clk_divider;
457 dfs[spi] = (cfg->frame_size / 8) + 1;
459 tmode[spi] = cfg->transfer_mode;
460 frf[spi] = cfg->frame_format;
467 QM_CHECK((spi < QM_SPI_NUM) && (spi != QM_SPI_SLV_0), -EINVAL);
470 if (QM_SPI[spi]->sr & QM_SPI_SR_BUSY) {
474 QM_SPI[spi]->ser = ss;
481 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
482 QM_CHECK(status, -EINVAL);
486 if (controller->
sr & QM_SPI_SR_BUSY) {
492 if (controller->
risr & QM_SPI_RISR_RXOIR) {
502 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
503 QM_CHECK(xfer, -EINVAL);
519 uint32_t i_tx = xfer->
tx_len;
520 uint32_t i_rx = xfer->
rx_len;
526 wait_for_controller(controller);
529 controller->
imr = QM_SPI_IMR_MASK_ALL;
539 controller->
ssienr = QM_SPI_SSIENR_SSIENR;
544 uint8_t *rx_buffer = xfer->
rx;
545 const uint8_t *tx_buffer = xfer->
tx;
550 tx_buffer = (uint8_t *)&tx_dummy_frame;
554 while (i_tx || i_rx) {
555 if (controller->
risr & QM_SPI_RISR_RXOIR) {
564 if (i_rx && (controller->
sr & QM_SPI_SR_RFNE)) {
565 read_frame(spi, rx_buffer);
566 rx_buffer += dfs[spi];
570 if (i_tx && (controller->
sr & QM_SPI_SR_TFNF)) {
571 write_frame(spi, tx_buffer);
572 tx_buffer += dfs[spi];
576 wait_for_controller(controller);
586 const qm_spi_update_t update)
588 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
589 QM_CHECK(xfer, -EINVAL);
600 QM_CHECK((spi != QM_SPI_SLV_0) ||
603 QM_CHECK(update == QM_SPI_UPDATE_TX || update == QM_SPI_UPDATE_RX ||
604 update == (QM_SPI_UPDATE_TX | QM_SPI_UPDATE_RX),
607 QM_CHECK((update & QM_SPI_UPDATE_TX) ? (tmode[spi] !=
QM_SPI_TMOD_RX)
611 QM_CHECK((update & QM_SPI_UPDATE_RX) ? (tmode[spi] !=
QM_SPI_TMOD_TX)
616 spi_async_transfer[spi] = xfer;
618 if (update == QM_SPI_UPDATE_RX) {
622 QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM | QM_SPI_IMR_RXFIM;
623 }
else if (update == QM_SPI_UPDATE_TX) {
626 controller->
imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM;
631 controller->
imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM |
632 QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM |
642 qm_spi_update_t update = 0;
643 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
644 QM_CHECK(xfer, -EINVAL);
655 update |= QM_SPI_UPDATE_RX;
660 update |= QM_SPI_UPDATE_TX;
667 if (QM_SPI_SLV_0 != spi) {
679 : SPI_MST_DEFAULT_RX_THRESHOLD;
681 controller->
txftlr = SPI_MST_DEFAULT_TX_THRESHOLD;
685 (xfer->
rx_len < SPI_SLV_DEFAULT_RX_THRESHOLD)
687 : SPI_SLV_DEFAULT_RX_THRESHOLD;
689 controller->
txftlr = SPI_SLV_DEFAULT_TX_THRESHOLD;
693 controller->
ctrlr0 &= ~QM_SPI_CTRLR0_SLV_OE;
696 controller->
ctrlr0 |= QM_SPI_CTRLR0_SLV_OE;
701 controller->
ssienr = QM_SPI_SSIENR_SSIENR;
710 write_frame(spi, (uint8_t *)&tx_dummy_frame);
718 handle_spi_mst_interrupt(QM_SPI_MST_0);
719 QM_ISR_EOI(QM_IRQ_SPI_MASTER_0_INT_VECTOR);
725 handle_spi_mst_interrupt(QM_SPI_MST_1);
726 QM_ISR_EOI(QM_IRQ_SPI_MASTER_1_INT_VECTOR);
732 handle_spi_slv_interrupt(QM_SPI_SLV_0);
733 QM_ISR_EOI(QM_IRQ_SPI_SLAVE_0_INT_VECTOR);
738 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
742 spi_async_transfer[spi];
745 controller->
imr = QM_SPI_IMR_MASK_ALL;
747 uint16_t tx_fifo_frames = controller->
txflr;
754 if (tx_counter[spi] > tx_fifo_frames) {
755 len = tx_counter[spi] - tx_fifo_frames;
760 len = rx_counter[spi];
777 static void spi_dma_callback(
void *callback_context, uint32_t len,
780 QM_ASSERT(callback_context);
782 int client_error = 0;
783 uint32_t frames_expected;
784 volatile bool *cb_pending_alternate_p;
789 dma_context_t *
const dma_context_p = callback_context;
790 const qm_spi_t spi = dma_context_p->spi_id;
791 QM_ASSERT(spi < QM_SPI_NUM);
794 spi_async_transfer[spi];
796 const uint8_t frame_size = dfs[spi];
797 QM_ASSERT((frame_size == 1) || (frame_size == 2) || (frame_size == 4));
801 const uint32_t frames_transfered = len / frame_size;
803 QM_ASSERT((dma_context_p == &dma_context_tx[spi]) ||
804 (dma_context_p == &dma_context_rx[spi]));
806 if (dma_context_p == &dma_context_tx[spi]) {
808 frames_expected = transfer->
tx_len;
809 cb_pending_alternate_p = &dma_context_rx[spi].cb_pending;
812 frames_expected = transfer->
rx_len;
813 cb_pending_alternate_p = &dma_context_tx[spi].cb_pending;
816 QM_ASSERT(cb_pending_alternate_p);
817 QM_ASSERT(dma_context_p->cb_pending);
818 dma_context_p->cb_pending =
false;
824 client_error = error_code;
825 }
else if (
false == *cb_pending_alternate_p) {
829 wait_for_controller(controller);
831 if (frames_transfered != frames_expected) {
832 QM_ASSERT(frames_transfered < frames_expected);
834 client_error = -ECANCELED;
842 controller->
dmacr = 0;
856 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
860 dma_context_t *dma_context_p = NULL;
891 switch (dma_channel_direction) {
907 SPI_DMA_WRITE_BURST_LENGTH;
909 dma_context_p = &dma_context_tx[spi];
926 SPI_DMA_READ_BURST_LENGTH;
928 dma_context_p = &dma_context_rx[spi];
941 QM_ASSERT(dma_context_p);
945 dma_context_p->spi_id = spi;
946 dma_context_p->dma_channel_id = dma_channel_id;
949 dma_core[spi] = dma_ctrl_id;
958 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
959 QM_CHECK(xfer, -EINVAL);
986 QM_CHECK(dma_core[spi] <
QM_DMA_NUM, -EINVAL);
991 if (0 != controller->
ssienr) {
996 controller->
imr = QM_SPI_IMR_MASK_ALL;
1003 dma_core[spi], dma_context_rx[spi].dma_channel_id,
1020 dma_core[spi], dma_context_tx[spi].dma_channel_id,
1030 spi_async_transfer[spi] = xfer;
1033 controller->
ssienr = QM_SPI_SSIENR_SSIENR;
1037 controller->
dmacr |= QM_SPI_DMACR_RDMAE;
1040 controller->
dmardlr = SPI_DMARDLR_DMARDL;
1042 dma_context_rx[spi].cb_pending =
true;
1045 dma_context_rx[spi].dma_channel_id);
1047 dma_context_rx[spi].cb_pending =
false;
1050 controller->
dmacr = 0;
1059 write_frame(spi, (uint8_t *)&tx_dummy_frame);
1065 controller->
dmacr |= QM_SPI_DMACR_TDMAE;
1068 controller->
dmatdlr = SPI_DMATDLR_DMATDL;
1070 dma_context_tx[spi].cb_pending =
true;
1073 dma_context_tx[spi].dma_channel_id);
1075 dma_context_tx[spi].cb_pending =
false;
1084 controller->
dmacr = 0;
1096 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
1097 QM_CHECK(dma_context_tx[spi].cb_pending
1101 QM_CHECK(dma_context_rx[spi].cb_pending
1108 if (dma_context_tx[spi].cb_pending) {
1111 dma_core[spi], dma_context_tx[spi].dma_channel_id)) {
1116 if (dma_context_rx[spi].cb_pending) {
1119 dma_core[spi], dma_context_rx[spi].dma_channel_id)) {
1127 #if (ENABLE_RESTORE_CONTEXT)
1130 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
1131 QM_CHECK(ctx != NULL, -EINVAL);
1145 QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
1146 QM_CHECK(ctx != NULL, -EINVAL);
uint32_t baudr
Baud Rate Select.
int qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *cfg)
Set SPI configuration.
qm_spi_slave_select_t
SPI slave select type.
QM_RW uint32_t dmardlr
DMA Receive Data Level.
QM_RW uint32_t isr
Interrupt Status Register.
QM_RW uint32_t txftlr
Transmit FIFO Threshold Level.
DMA channel configuration structure.
qm_dma_transfer_type_t transfer_type
DMA transfer type.
QM_RW uint32_t dmacr
DMA Control Register.
int qm_spi_irq_transfer(const qm_spi_t spi, const volatile qm_spi_async_transfer_t *const xfer)
Interrupt based transfer on SPI.
int qm_spi_dma_channel_config(const qm_spi_t spi, const qm_dma_t dma_ctrl_id, const qm_dma_channel_id_t dma_channel_id, const qm_dma_channel_direction_t dma_channel_direction)
Configure a DMA channel with a specific transfer direction.
int qm_spi_dma_transfer_terminate(qm_spi_t spi)
Terminate the current DMA transfer on the SPI bus.
uint16_t tx_len
Number of data frames to write.
int qm_spi_transfer(const qm_spi_t spi, const qm_spi_transfer_t *const xfer, qm_spi_status_t *const status)
Multi-frame read / write on SPI.
QM_RW uint32_t ctrlr1
Control Register 1.
SPI synchronous transfer type.
QM_RW uint32_t baudr
Baud Rate Select.
uint32_t * destination_address
DMA destination transfer address.
int qm_spi_slave_select(const qm_spi_t spi, const qm_spi_slave_select_t ss)
Select which slave to perform SPI transmissions on.
QM_RW uint32_t ssienr
SSI Enable Register.
DMA single block transfer configuration structure.
qm_dma_burst_length_t source_burst_length
DMA source burst length.
uint32_t ser
Slave Enable Register.
uint32_t block_size
DMA block size, Min = 1, Max = 4095.
void * callback_context
DMA client context passed to the callbacks.
Transfer width of 8 bits.
int qm_spi_irq_transfer_terminate(const qm_spi_t spi)
Terminate SPI IRQ transfer.
qm_spi_reg_t * qm_spi_controllers[QM_SPI_NUM]
Extern qm_spi_reg_t* array declared at qm_soc_regs.h .
qm_dma_handshake_interface_t handshake_interface
DMA channel handshake interface ID.
int qm_dma_transfer_set_config(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_transfer_t *const transfer_config)
Setup a DMA single block transfer.
void(* callback)(void *data, int error, qm_spi_status_t status, uint16_t len)
Transfer callback.
qm_dma_handshake_polarity_t handshake_polarity
DMA channel handshake polarity.
QM_RW uint32_t rxflr
Receive FIFO Level Register.
QM_ISR_DECLARE(qm_spi_master_0_isr)
ISR for SPI Master 0 interrupt.
bool keep_enabled
Keep device on once transfer is done.
QM_RW uint32_t dr[36]
Data Register.
qm_spi_t
Number of SPI controllers.
QM_RW uint32_t risr
Raw Interrupt Status Register.
uint16_t tx_len
Number of data frames to write.
Peripheral to memory transfer.
qm_dma_burst_length_t destination_burst_length
DMA destination burst length.
qm_spi_status_t
SPI status.
int qm_spi_dma_transfer(const qm_spi_t spi, const qm_spi_async_transfer_t *const xfer)
Perform a DMA-based transfer on the SPI bus.
qm_dma_transfer_width_t destination_transfer_width
DMA destination transfer width.
int qm_dma_channel_set_config(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_channel_config_t *const channel_config)
Setup a DMA channel configuration.
SPI aynchronous transfer type.
qm_spi_tmode_t
SPI transfer mode type.
Number of DMA controllers.
int qm_spi_save_context(const qm_spi_t spi, qm_spi_context_t *const ctx)
Save SPI context.
qm_dma_transfer_width_t source_transfer_width
DMA source transfer width.
QM_RW uint32_t sr
Status Register.
int qm_dma_transfer_terminate(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
Terminate a DMA transfer.
qm_dma_channel_direction_t
DMA channel direction.
int qm_dma_transfer_start(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
Start a DMA transfer.
QM_RW uint32_t imr
Interrupt Mask Register.
SPI device is not in use.
QM_RW uint32_t rxoicr
Rx FIFO Overflow Interrupt Clear Register.
QM_RW uint32_t ser
Slave Enable Register.
Memory to peripheral transfer.
QM_RW uint32_t ctrlr0
Control Register 0.
Transfer width of 16 bits.
void * callback_data
Callback user data.
uint32_t * source_address
DMA source transfer address.
uint32_t ctrlr0
Control Register 0.
int qm_spi_get_status(const qm_spi_t spi, qm_spi_status_t *const status)
Get SPI bus status.
Transfer width of 32 bits.
uint16_t rx_len
Number of data frames to read.
qm_spi_frame_format_t
QM SPI Frame Format.
int qm_spi_irq_update(const qm_spi_t spi, const volatile qm_spi_async_transfer_t *const xfer, const qm_spi_update_t update)
Update parameters of Interrupt based transfer on SPI.
qm_dma_channel_id_t
DMA channel IDs.
QM_RW uint32_t txflr
Transmit FIFO Level Register.
QM_RW uint32_t dmatdlr
DMA Transmit Data Level.
qm_dma_channel_direction_t channel_direction
DMA channel direction.
uint16_t rx_len
Number of data frames to read.
int qm_spi_restore_context(const qm_spi_t spi, const qm_spi_context_t *const ctx)
Restore SPI context.
QM_RW uint32_t rxftlr
Receive FIFO Threshold Level.
void(* client_callback)(void *callback_context, uint32_t len, int error_code)
Client callback for DMA transfer ISR.
RX transfer has overflown.