Intel® Quark™ Microcontroller Software Interface  1.4.0
Intel® Quark™ Microcontroller BSP
qm_spi.c
1 /*
2  * {% copyright %}
3  */
4 
5 #include "qm_spi.h"
6 
7 /* SPI FIFO size defaults */
8 #define SPI_MST_DEFAULT_TX_THRESHOLD (0x05)
9 #define SPI_MST_DEFAULT_RX_THRESHOLD (0x05)
10 #define SPI_SLV_DEFAULT_TX_THRESHOLD (0x04)
11 #define SPI_SLV_DEFAULT_RX_THRESHOLD (0x03)
12 #define SPI_FIFOS_DEPTH (8)
13 
14 /* SPI DMA transmit watermark level. When the number of valid data entries in
15  * the transmit FIFO is equal to or below this field value, dma_tx_req is
16  * generated. The destination burst length has to fit in the remaining space
17  * of the transmit FIFO, thus it must be <= (SPI_FIFOS_DEPTH - TDLR).
18  * For optimal results it must be set to that delta so we can ensure the number
19  * of DMA transactions (bursts) needed are minimal, leading to a better bus
20  * utilization.
21  *
22  * With that in mind, here we choose 4 frames as a watermark level (TDLR) so we
23  * can end up with a valid value for SPI_DMA_WRITE_BURST_LENGTH of 4 frames,
24  * still adhering to the above (FIFOS_DEPTH - TDLR = 4).
25  */
26 #define SPI_DMATDLR_DMATDL (0x4)
27 #define SPI_DMA_WRITE_BURST_LENGTH QM_DMA_BURST_TRANS_LENGTH_4
28 
29 /* SPI DMA receive watermark level. When the number of valid data entries in the
30  * receive FIFO is equal to or above this field value + 1, dma_rx_req is
31  * generated. The burst length has to match the watermark level so that the
32  * exact number of data entries fit one burst, and therefore only some values
33  * are allowed:
34  * DMARDL DMA read burst length
35  * 0 1
36  * 3 4
37  * 7 (highest) 8
38  *
39  * By keeping SPI_DMA_READ_BURST_LENGTH = RDLR + 1, we have optimal results
40  * since it reduces the number of DMA transactions, leading to a better bus
41  * utilization.
42  *
43  * Note that, unlike we do for IRQ transfers, there is no need to adjust the
44  * watermark level (RDLR for DMA transfers, RXFTLR for IRQ ones) during or at
45  * the start of the DMA transaction, if rx_len < RDLR. This is done
46  * automatically
47  * by the SPI DMA interface when it decides between burst or single transactions
48  * through means of the BLOCK_TS and SRC_MSIZE ratio.
49  */
50 #define SPI_DMARDLR_DMARDL (0x03)
51 #define SPI_DMA_READ_BURST_LENGTH QM_DMA_BURST_TRANS_LENGTH_4
52 
53 /* DMA transfer information, relevant on callback invocations from the DMA
54  * driver. */
55 typedef struct {
56  qm_spi_t spi_id; /**< SPI controller identifier. */
57  qm_dma_channel_id_t dma_channel_id; /**< Used DMA channel. */
58  volatile bool cb_pending; /**< True if waiting for DMA calllback. */
59 } dma_context_t;
60 
61 /**
62  * Extern qm_spi_reg_t* array declared at qm_soc_regs.h .
63  */
64 #ifndef UNIT_TEST
65 #if (QUARK_SE)
67  (qm_spi_reg_t *)QM_SPI_MST_0_BASE, (qm_spi_reg_t *)QM_SPI_MST_1_BASE,
68  (qm_spi_reg_t *)QM_SPI_SLV_BASE};
69 #elif(QUARK_D2000)
70 qm_spi_reg_t *qm_spi_controllers[QM_SPI_NUM] = {
71  (qm_spi_reg_t *)QM_SPI_MST_0_BASE, (qm_spi_reg_t *)QM_SPI_SLV_BASE};
72 #endif
73 #endif
74 
75 static const volatile qm_spi_async_transfer_t *spi_async_transfer[QM_SPI_NUM];
76 static volatile uint16_t tx_counter[QM_SPI_NUM];
77 static volatile uint16_t rx_counter[QM_SPI_NUM];
78 static uint8_t dfs[QM_SPI_NUM];
79 static const uint32_t tx_dummy_frame = 0;
80 static qm_spi_tmode_t tmode[QM_SPI_NUM];
81 static qm_spi_frame_format_t frf[QM_SPI_NUM];
82 /* DMA (memory to SPI controller) callback information. */
83 static dma_context_t dma_context_tx[QM_SPI_NUM];
84 /* DMA (SPI controller to memory) callback information. */
85 static dma_context_t dma_context_rx[QM_SPI_NUM];
86 /* DMA core being used by each SPI controller. */
87 static qm_dma_t dma_core[QM_SPI_NUM];
88 
89 static void read_frame(const qm_spi_t spi, uint8_t *const rx_buffer)
90 {
91  const qm_spi_reg_t *const controller = QM_SPI[spi];
92  const uint8_t frame_size = dfs[spi];
93 
94  if (frame_size == 1) {
95  *(uint8_t *)rx_buffer = controller->dr[0];
96  } else if (frame_size == 2) {
97  *(uint16_t *)rx_buffer = controller->dr[0];
98  } else {
99  *(uint32_t *)rx_buffer = controller->dr[0];
100  }
101 }
102 
103 static void write_frame(const qm_spi_t spi, const uint8_t *const tx_buffer)
104 {
105  qm_spi_reg_t *const controller = QM_SPI[spi];
106  const uint8_t frame_size = dfs[spi];
107 
108  if (frame_size == 1) {
109  controller->dr[0] = *(uint8_t *)tx_buffer;
110  } else if (frame_size == 2) {
111  controller->dr[0] = *(uint16_t *)tx_buffer;
112  } else {
113  controller->dr[0] = *(uint32_t *)tx_buffer;
114  }
115 }
116 
117 static void wait_for_controller(const qm_spi_reg_t *const controller)
118 {
119  /**
120  * The controller must poll TFE status waiting for 1
121  * before checking QM_SPI_SR_BUSY.
122  */
123  while (!(controller->sr & QM_SPI_SR_TFE))
124  ;
125  while (controller->sr & QM_SPI_SR_BUSY)
126  ;
127 }
128 
129 /*
130  * Service a RX FIFO Full interrupt on master side.
131  */
132 static __inline__ void handle_mst_rx_interrupt(const qm_spi_t spi)
133 {
134  qm_spi_reg_t *const controller = QM_SPI[spi];
135  const volatile qm_spi_async_transfer_t *const transfer =
136  spi_async_transfer[spi];
137 
138  /* Jump to the right position of RX buffer.
139  * If no bytes were received before, we start from the beginning,
140  * otherwise we jump to the next available frame position.
141  */
142  uint8_t *rx_buffer = transfer->rx + (rx_counter[spi] * dfs[spi]);
143 
144  while (controller->rxflr) {
145  read_frame(spi, rx_buffer);
146  rx_buffer += dfs[spi];
147  rx_counter[spi]++;
148 
149  /* Check that there's not more data in the FIFO than we had
150  * requested.
151  */
152  if (transfer->rx_len == rx_counter[spi]) {
153  if (tmode[spi] == QM_SPI_TMOD_RX) {
154  controller->imr = QM_SPI_IMR_MASK_ALL;
155  controller->ssienr = 0;
156  if (transfer->callback) {
157  transfer->callback(
158  transfer->callback_data, 0,
159  QM_SPI_IDLE, transfer->rx_len);
160  }
161  } else {
162  controller->imr &=
163  ~(QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM |
164  QM_SPI_IMR_RXFIM);
165  }
166  break;
167  }
168  }
169 
170  /* Check if enough data will arrive to trigger an interrupt and adjust
171  * rxftlr accordingly.
172  */
173  const uint32_t frames_left = transfer->rx_len - rx_counter[spi];
174  if (frames_left <= controller->rxftlr) {
175  controller->rxftlr = frames_left - 1;
176  }
177 }
178 
179 /**
180  * Service a TX FIFO Empty interrupt on master side.
181  *
182  * @brief Interrupt based transfer on SPI.
183  * @param [in] spi Which SPI to transfer to.
184  */
185 static __inline__ void handle_mst_tx_interrupt(const qm_spi_t spi)
186 {
187  qm_spi_reg_t *const controller = QM_SPI[spi];
188  const volatile qm_spi_async_transfer_t *const transfer =
189  spi_async_transfer[spi];
190 
191  /* Jump to the right position of TX buffer.
192  * If no bytes were transmitted before, we start from the beginning,
193  * otherwise we jump to the next frame to be sent.
194  */
195  const uint8_t *tx_buffer = transfer->tx + (tx_counter[spi] * dfs[spi]);
196 
197  int frames =
198  SPI_FIFOS_DEPTH - controller->txflr - controller->rxflr - 1;
199 
200  while (frames > 0) {
201  write_frame(spi, tx_buffer);
202  tx_buffer += dfs[spi];
203  tx_counter[spi]++;
204  frames--;
205 
206  if (transfer->tx_len == tx_counter[spi]) {
207  controller->txftlr = 0;
208  break;
209  }
210  }
211 }
212 
213 /*
214  * Service a RX FIFO Full interrupt on slave side.
215  */
216 static __inline__ void handle_slv_rx_interrupt(const qm_spi_t spi)
217 {
218  qm_spi_reg_t *const controller = QM_SPI[spi];
219  const volatile qm_spi_async_transfer_t *const transfer =
220  spi_async_transfer[spi];
221 
222  uint8_t *rx_buffer = transfer->rx + (rx_counter[spi] * dfs[spi]);
223  uint16_t len = transfer->rx_len;
224  int32_t frames_left = 0;
225 
226  while (controller->rxflr) {
227 
228  if (rx_counter[spi] < len) {
229 
230  read_frame(spi, rx_buffer);
231  rx_buffer += dfs[spi];
232  rx_counter[spi]++;
233 
234  if (rx_counter[spi] == len) {
235  /* Application notification. */
236  if (transfer->callback) {
237  /*
238  * Application can now read received
239  * data. In order to receive more
240  * data, the application needs to
241  * call the update function.
242  */
243  transfer->callback(
244  transfer->callback_data, 0,
245  QM_SPI_RX_FULL, len);
246  /*
247  * RX counter is zero if the application
248  * has called the update function.
249  */
250  if (!rx_counter[spi]) {
251  /*
252  * Update transfer information.
253  */
254  rx_buffer = transfer->rx;
255  len = transfer->rx_len;
256  } else {
257  break;
258  }
259  }
260  }
261  } else {
262  break;
263  }
264  }
265 
266  frames_left = len - rx_counter[spi];
267  if (frames_left > 0 && (uint32_t)frames_left <= controller->rxftlr) {
268  controller->rxftlr = frames_left - 1;
269  }
270 }
271 
272 /*
273  * Service a TX FIFO Empty interrupt on slave side.
274  */
275 static __inline__ void handle_slv_tx_interrupt(const qm_spi_t spi)
276 {
277  qm_spi_reg_t *const controller = QM_SPI[spi];
278  const volatile qm_spi_async_transfer_t *const transfer =
279  spi_async_transfer[spi];
280  const uint8_t *tx_buffer = transfer->tx + (tx_counter[spi] * dfs[spi]);
281  uint16_t len = transfer->tx_len;
282  int entries_free =
283  SPI_FIFOS_DEPTH - controller->txflr - controller->rxflr - 1;
284 
285  while (entries_free > 0 && tx_counter[spi] < len) {
286 
287  write_frame(spi, tx_buffer);
288  tx_buffer += dfs[spi];
289 
290  entries_free--;
291  tx_counter[spi]++;
292 
293  if (tx_counter[spi] == len) {
294  /* Application notification. */
295  if (transfer->callback) {
296  /*
297  * In order to transmit more data, the
298  * application needs to call the update
299  * function.
300  */
301  transfer->callback(transfer->callback_data, 0,
302  QM_SPI_TX_EMPTY, len);
303 
304  /*
305  * RX counter is zero if the application
306  * has called the update function.
307  */
308  if (!tx_counter[spi]) {
309  /* Update transfer information. */
310  tx_buffer = transfer->tx;
311  len = transfer->tx_len;
312  }
313  }
314  }
315  }
316 
317  if (tx_counter[spi] >= len) {
318  controller->txftlr = 0;
319  }
320 }
321 
322 static void handle_spi_overflow_interrupt(const qm_spi_t spi)
323 {
324  qm_spi_reg_t *const controller = QM_SPI[spi];
325  const volatile qm_spi_async_transfer_t *transfer =
326  spi_async_transfer[spi];
327 
328  if (transfer->callback) {
329  transfer->callback(transfer->callback_data, -EIO,
330  QM_SPI_RX_OVERFLOW, rx_counter[spi]);
331  }
332 
333  /* Clear RX FIFO Overflow interrupt. */
334  controller->rxoicr;
335  controller->imr = QM_SPI_IMR_MASK_ALL;
336  controller->ssienr = 0;
337 }
338 
339 static void handle_spi_mst_interrupt(const qm_spi_t spi)
340 {
341  qm_spi_reg_t *const controller = QM_SPI[spi];
342  const volatile qm_spi_async_transfer_t *transfer =
343  spi_async_transfer[spi];
344  const uint32_t int_status = controller->isr;
345 
346  QM_ASSERT((int_status & (QM_SPI_ISR_TXOIS | QM_SPI_ISR_RXUIS)) == 0);
347 
348  /* RX FIFO Overflow interrupt. */
349  if (int_status & QM_SPI_ISR_RXOIS) {
350  handle_spi_overflow_interrupt(spi);
351  return;
352  }
353 
354  if (int_status & QM_SPI_ISR_RXFIS) {
355  handle_mst_rx_interrupt(spi);
356  }
357 
358  if (transfer->rx_len == rx_counter[spi] &&
359  transfer->tx_len == tx_counter[spi] &&
360  (controller->sr & QM_SPI_SR_TFE) &&
361  !(controller->sr & QM_SPI_SR_BUSY)) {
362  controller->imr = QM_SPI_IMR_MASK_ALL;
363  controller->ssienr = 0;
364 
365  if (transfer->callback && tmode[spi] != QM_SPI_TMOD_RX) {
366  transfer->callback(transfer->callback_data, 0,
367  QM_SPI_IDLE, transfer->tx_len);
368  }
369 
370  return;
371  }
372 
373  if (int_status & QM_SPI_ISR_TXEIS &&
374  transfer->tx_len > tx_counter[spi]) {
375  handle_mst_tx_interrupt(spi);
376  }
377 }
378 
379 static void handle_spi_slv_interrupt(const qm_spi_t spi)
380 {
381  qm_spi_reg_t *const controller = QM_SPI[spi];
382  const volatile qm_spi_async_transfer_t *transfer =
383  spi_async_transfer[spi];
384  const uint32_t int_status = controller->isr;
385 
386  QM_ASSERT((int_status & (QM_SPI_ISR_TXOIS | QM_SPI_ISR_RXUIS)) == 0);
387 
388  if (int_status & QM_SPI_ISR_RXOIS) {
389  /* RX FIFO Overflow interrupt. */
390  handle_spi_overflow_interrupt(spi);
391  return;
392  }
393 
394  if (int_status & QM_SPI_ISR_RXFIS) {
395  /* RX FIFO Full interrupt. */
396  handle_slv_rx_interrupt(spi);
397  }
398 
399  if (transfer->rx_len == rx_counter[spi] &&
400  transfer->tx_len == tx_counter[spi] &&
401  (controller->sr & QM_SPI_SR_TFE) &&
402  (!(controller->sr & QM_SPI_SR_BUSY) ||
403  tmode[spi] == QM_SPI_TMOD_RX)) {
404  /* End of communication. */
405  if (!transfer->keep_enabled) {
406  controller->ssienr = 0;
407  }
408  controller->imr = QM_SPI_IMR_MASK_ALL;
409  /* Application notification. */
410  if (transfer->callback) {
411  transfer->callback(transfer->callback_data, 0,
412  QM_SPI_IDLE, 0);
413  }
414  return;
415  }
416 
417  if (int_status & QM_SPI_ISR_TXEIS) {
418  /* TX FIFO Empty interrupt. */
419  handle_slv_tx_interrupt(spi);
420  }
421 }
422 
423 int qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *cfg)
424 {
425  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
426  QM_CHECK(cfg, -EINVAL);
427  QM_CHECK(QM_SPI_SLV_0 == spi
428  ? cfg->transfer_mode != QM_SPI_TMOD_EEPROM_READ
429  : 1,
430  -EINVAL);
431 
432  if (0 != QM_SPI[spi]->ssienr) {
433  return -EBUSY;
434  }
435 
436  qm_spi_reg_t *const controller = QM_SPI[spi];
437 
438  /* Apply the selected cfg options. */
439  controller->ctrlr0 = (cfg->frame_size << QM_SPI_CTRLR0_DFS_32_OFFSET) |
440  (cfg->transfer_mode << QM_SPI_CTRLR0_TMOD_OFFSET) |
441  (cfg->bus_mode << QM_SPI_CTRLR0_SCPOL_SCPH_OFFSET);
442 
443  /*
444  * If the device is configured as a slave, an external master will
445  * set the baud rate.
446  */
447  if (QM_SPI_SLV_0 != spi) {
448  controller->baudr = cfg->clk_divider;
449  }
450 
451  /* Keep the current data frame size in bytes, being:
452  * - 1 byte for DFS set from 4 to 8 bits;
453  * - 2 bytes for DFS set from 9 to 16 bits;
454  * - 3 bytes for DFS set from 17 to 24 bits;
455  * - 4 bytes for DFS set from 25 to 32 bits.
456  */
457  dfs[spi] = (cfg->frame_size / 8) + 1;
458 
459  tmode[spi] = cfg->transfer_mode;
460  frf[spi] = cfg->frame_format;
461 
462  return 0;
463 }
464 
466 {
467  QM_CHECK((spi < QM_SPI_NUM) && (spi != QM_SPI_SLV_0), -EINVAL);
468 
469  /* Check if the device reports as busy. */
470  if (QM_SPI[spi]->sr & QM_SPI_SR_BUSY) {
471  return -EBUSY;
472  }
473 
474  QM_SPI[spi]->ser = ss;
475 
476  return 0;
477 }
478 
479 int qm_spi_get_status(const qm_spi_t spi, qm_spi_status_t *const status)
480 {
481  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
482  QM_CHECK(status, -EINVAL);
483 
484  qm_spi_reg_t *const controller = QM_SPI[spi];
485 
486  if (controller->sr & QM_SPI_SR_BUSY) {
487  *status = QM_SPI_BUSY;
488  } else {
489  *status = QM_SPI_IDLE;
490  }
491 
492  if (controller->risr & QM_SPI_RISR_RXOIR) {
493  *status = QM_SPI_RX_OVERFLOW;
494  }
495 
496  return 0;
497 }
498 
499 int qm_spi_transfer(const qm_spi_t spi, const qm_spi_transfer_t *const xfer,
500  qm_spi_status_t *const status)
501 {
502  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
503  QM_CHECK(xfer, -EINVAL);
504  QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX
505  ? (xfer->tx_len == xfer->rx_len)
506  : 1,
507  -EINVAL);
508  QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1,
509  -EINVAL);
510  QM_CHECK(((tmode[spi] == QM_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1) ||
511  (((tmode[spi] == QM_SPI_TMOD_RX) &&
512  (QM_SPI_FRAME_FORMAT_STANDARD != frf[spi])))),
513  -EINVAL);
514  QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
515  ? (xfer->tx_len && xfer->rx_len)
516  : 1,
517  -EINVAL);
518 
519  uint32_t i_tx = xfer->tx_len;
520  uint32_t i_rx = xfer->rx_len;
521  int rc = 0;
522 
523  qm_spi_reg_t *const controller = QM_SPI[spi];
524 
525  /* Wait for the SPI device to become available. */
526  wait_for_controller(controller);
527 
528  /* Mask all interrupts, this is a blocking function. */
529  controller->imr = QM_SPI_IMR_MASK_ALL;
530 
531  /* If we are in RX only or EEPROM Read mode, the ctrlr1 reg holds how
532  * many bytes the controller solicits, minus 1.
533  */
534  if (xfer->rx_len) {
535  controller->ctrlr1 = xfer->rx_len - 1;
536  }
537 
538  /* Enable SPI device. */
539  controller->ssienr = QM_SPI_SSIENR_SSIENR;
540 
541  /* Transfer is only complete when all the tx data is sent and all
542  * expected rx data has been received.
543  */
544  uint8_t *rx_buffer = xfer->rx;
545  const uint8_t *tx_buffer = xfer->tx;
546 
547  /* RX Only transfers need a dummy frame to be sent for starting. */
548  if ((tmode[spi] == QM_SPI_TMOD_RX) &&
549  (QM_SPI_FRAME_FORMAT_STANDARD == frf[spi])) {
550  tx_buffer = (uint8_t *)&tx_dummy_frame;
551  i_tx = 1;
552  }
553 
554  while (i_tx || i_rx) {
555  if (controller->risr & QM_SPI_RISR_RXOIR) {
556  rc = -EIO;
557  if (status) {
558  *status |= QM_SPI_RX_OVERFLOW;
559  }
560  controller->rxoicr;
561  break;
562  }
563 
564  if (i_rx && (controller->sr & QM_SPI_SR_RFNE)) {
565  read_frame(spi, rx_buffer);
566  rx_buffer += dfs[spi];
567  i_rx--;
568  }
569 
570  if (i_tx && (controller->sr & QM_SPI_SR_TFNF)) {
571  write_frame(spi, tx_buffer);
572  tx_buffer += dfs[spi];
573  i_tx--;
574  }
575  }
576  wait_for_controller(controller);
577 
578  /* Disable SPI Device. */
579  controller->ssienr = 0;
580 
581  return rc;
582 }
583 
585  const volatile qm_spi_async_transfer_t *const xfer,
586  const qm_spi_update_t update)
587 {
588  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
589  QM_CHECK(xfer, -EINVAL);
590  QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1,
591  -EINVAL);
592  QM_CHECK(((tmode[spi] == QM_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1) ||
593  (((tmode[spi] == QM_SPI_TMOD_RX) &&
594  (QM_SPI_FRAME_FORMAT_STANDARD != frf[spi])))),
595  -EINVAL);
596  QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
597  ? (xfer->tx_len && xfer->rx_len)
598  : 1,
599  -EINVAL);
600  QM_CHECK((spi != QM_SPI_SLV_0) ||
601  (tmode[spi] != QM_SPI_TMOD_EEPROM_READ),
602  -EINVAL);
603  QM_CHECK(update == QM_SPI_UPDATE_TX || update == QM_SPI_UPDATE_RX ||
604  update == (QM_SPI_UPDATE_TX | QM_SPI_UPDATE_RX),
605  -EINVAL);
606  /* If updating only TX, then the mode shall not be RX. */
607  QM_CHECK((update & QM_SPI_UPDATE_TX) ? (tmode[spi] != QM_SPI_TMOD_RX)
608  : 1,
609  -EINVAL);
610  /* If updating only RX, then the mode shall not be TX. */
611  QM_CHECK((update & QM_SPI_UPDATE_RX) ? (tmode[spi] != QM_SPI_TMOD_TX)
612  : 1,
613  -EINVAL);
614 
615  qm_spi_reg_t *const controller = QM_SPI[spi];
616  spi_async_transfer[spi] = xfer;
617 
618  if (update == QM_SPI_UPDATE_RX) {
619  rx_counter[spi] = 0;
620  /* Unmask RX interrupt sources. */
621  controller->imr =
622  QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM | QM_SPI_IMR_RXFIM;
623  } else if (update == QM_SPI_UPDATE_TX) {
624  tx_counter[spi] = 0;
625  /* Unmask TX interrupt sources. */
626  controller->imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM;
627  } else {
628  rx_counter[spi] = 0;
629  tx_counter[spi] = 0;
630  /* Unmask both TX and RX interrupt sources. */
631  controller->imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM |
632  QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM |
633  QM_SPI_IMR_RXFIM;
634  }
635 
636  return 0;
637 }
638 
640  const volatile qm_spi_async_transfer_t *const xfer)
641 {
642  qm_spi_update_t update = 0;
643  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
644  QM_CHECK(xfer, -EINVAL);
645  QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX
646  ? (xfer->tx_len == xfer->rx_len)
647  : 1,
648  -EINVAL);
649 
650  qm_spi_reg_t *const controller = QM_SPI[spi];
651 
652  if ((tmode[spi] == QM_SPI_TMOD_RX) ||
653  (tmode[spi] == QM_SPI_TMOD_TX_RX) ||
654  (tmode[spi] == QM_SPI_TMOD_EEPROM_READ)) {
655  update |= QM_SPI_UPDATE_RX;
656  }
657  if ((tmode[spi] == QM_SPI_TMOD_TX) ||
658  (tmode[spi] == QM_SPI_TMOD_TX_RX) ||
659  (tmode[spi] == QM_SPI_TMOD_EEPROM_READ)) {
660  update |= QM_SPI_UPDATE_TX;
661  }
662 
663  rx_counter[spi] = 0;
664  tx_counter[spi] = 0;
665  qm_spi_irq_update(spi, xfer, update);
666 
667  if (QM_SPI_SLV_0 != spi) {
668  /*
669  * If we are in RX only or EEPROM Read mode, the ctrlr1 reg
670  * holds how many bytes the controller solicits, minus 1.
671  * We also set the same into rxftlr, so the controller only
672  * triggers a RX_FIFO_FULL interrupt when all frames are
673  * available at the FIFO for consumption.
674  */
675  if (xfer->rx_len) {
676  controller->ctrlr1 = xfer->rx_len - 1;
677  controller->rxftlr = (xfer->rx_len < SPI_FIFOS_DEPTH)
678  ? xfer->rx_len - 1
679  : SPI_MST_DEFAULT_RX_THRESHOLD;
680  }
681  controller->txftlr = SPI_MST_DEFAULT_TX_THRESHOLD;
682  } else {
683  if (xfer->rx_len) {
684  controller->rxftlr =
685  (xfer->rx_len < SPI_SLV_DEFAULT_RX_THRESHOLD)
686  ? xfer->rx_len - 1
687  : SPI_SLV_DEFAULT_RX_THRESHOLD;
688  }
689  controller->txftlr = SPI_SLV_DEFAULT_TX_THRESHOLD;
690 
691  if (QM_SPI_TMOD_RX != tmode[spi]) {
692  /* Enable MISO line. */
693  controller->ctrlr0 &= ~QM_SPI_CTRLR0_SLV_OE;
694  } else {
695  /* Disable MISO line. */
696  controller->ctrlr0 |= QM_SPI_CTRLR0_SLV_OE;
697  }
698  }
699 
700  /* Enable SPI controller. */
701  controller->ssienr = QM_SPI_SSIENR_SSIENR;
702 
703  if ((QM_SPI_SLV_0 != spi && QM_SPI_TMOD_RX == tmode[spi]) &&
704  (QM_SPI_FRAME_FORMAT_STANDARD == frf[spi])) {
705  /*
706  * In RX only, master is required to send
707  * a dummy frame in order to start the
708  * communication.
709  */
710  write_frame(spi, (uint8_t *)&tx_dummy_frame);
711  }
712 
713  return 0;
714 }
715 
716 QM_ISR_DECLARE(qm_spi_master_0_isr)
717 {
718  handle_spi_mst_interrupt(QM_SPI_MST_0);
719  QM_ISR_EOI(QM_IRQ_SPI_MASTER_0_INT_VECTOR);
720 }
721 
722 #if (QUARK_SE)
723 QM_ISR_DECLARE(qm_spi_master_1_isr)
724 {
725  handle_spi_mst_interrupt(QM_SPI_MST_1);
726  QM_ISR_EOI(QM_IRQ_SPI_MASTER_1_INT_VECTOR);
727 }
728 #endif
729 
730 QM_ISR_DECLARE(qm_spi_slave_0_isr)
731 {
732  handle_spi_slv_interrupt(QM_SPI_SLV_0);
733  QM_ISR_EOI(QM_IRQ_SPI_SLAVE_0_INT_VECTOR);
734 }
735 
737 {
738  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
739 
740  qm_spi_reg_t *const controller = QM_SPI[spi];
741  const volatile qm_spi_async_transfer_t *const transfer =
742  spi_async_transfer[spi];
743 
744  /* Mask the interrupts. */
745  controller->imr = QM_SPI_IMR_MASK_ALL;
746  /* Read how many frames are still on TX queue. */
747  uint16_t tx_fifo_frames = controller->txflr;
748  /* Disable SPI device. */
749  controller->ssienr = 0;
750 
751  if (transfer->callback) {
752  uint16_t len = 0;
753  if (tmode[spi] == QM_SPI_TMOD_TX) {
754  if (tx_counter[spi] > tx_fifo_frames) {
755  len = tx_counter[spi] - tx_fifo_frames;
756  } else {
757  len = 0;
758  }
759  } else {
760  len = rx_counter[spi];
761  }
762  /*
763  * NOTE: change this to return controller-specific code
764  * 'user aborted'.
765  */
766  transfer->callback(transfer->callback_data, -ECANCELED,
767  QM_SPI_IDLE, len);
768  }
769 
770  tx_counter[spi] = 0;
771  rx_counter[spi] = 0;
772 
773  return 0;
774 }
775 
776 /* DMA driver invoked callback. */
777 static void spi_dma_callback(void *callback_context, uint32_t len,
778  int error_code)
779 {
780  QM_ASSERT(callback_context);
781 
782  int client_error = 0;
783  uint32_t frames_expected;
784  volatile bool *cb_pending_alternate_p;
785 
786  /* The DMA driver returns a pointer to a dma_context struct from which
787  * we find out the corresponding SPI device and transfer direction.
788  */
789  dma_context_t *const dma_context_p = callback_context;
790  const qm_spi_t spi = dma_context_p->spi_id;
791  QM_ASSERT(spi < QM_SPI_NUM);
792  qm_spi_reg_t *const controller = QM_SPI[spi];
793  const volatile qm_spi_async_transfer_t *const transfer =
794  spi_async_transfer[spi];
795  QM_ASSERT(transfer);
796  const uint8_t frame_size = dfs[spi];
797  QM_ASSERT((frame_size == 1) || (frame_size == 2) || (frame_size == 4));
798 
799  /* DMA driver returns length in bytes but user expects number of frames.
800  */
801  const uint32_t frames_transfered = len / frame_size;
802 
803  QM_ASSERT((dma_context_p == &dma_context_tx[spi]) ||
804  (dma_context_p == &dma_context_rx[spi]));
805 
806  if (dma_context_p == &dma_context_tx[spi]) {
807  /* TX transfer. */
808  frames_expected = transfer->tx_len;
809  cb_pending_alternate_p = &dma_context_rx[spi].cb_pending;
810  } else {
811  /* RX transfer. */
812  frames_expected = transfer->rx_len;
813  cb_pending_alternate_p = &dma_context_tx[spi].cb_pending;
814  }
815 
816  QM_ASSERT(cb_pending_alternate_p);
817  QM_ASSERT(dma_context_p->cb_pending);
818  dma_context_p->cb_pending = false;
819 
820  if (error_code) {
821  /* Transfer failed, pass to client the error code returned by
822  * the DMA driver.
823  */
824  client_error = error_code;
825  } else if (false == *cb_pending_alternate_p) {
826  /* TX transfers invoke the callback before the TX data has been
827  * transmitted, we need to wait here.
828  */
829  wait_for_controller(controller);
830 
831  if (frames_transfered != frames_expected) {
832  QM_ASSERT(frames_transfered < frames_expected);
833  /* Callback triggered through a transfer terminate. */
834  client_error = -ECANCELED;
835  }
836  } else {
837  /* Controller busy due to alternate DMA channel active. */
838  return;
839  }
840 
841  /* Disable DMA setting and SPI controller. */
842  controller->dmacr = 0;
843  controller->ssienr = 0;
844 
845  if (transfer->callback) {
846  transfer->callback(transfer->callback_data, client_error,
847  QM_SPI_IDLE, frames_transfered);
848  }
849 }
850 
852  const qm_spi_t spi, const qm_dma_t dma_ctrl_id,
853  const qm_dma_channel_id_t dma_channel_id,
854  const qm_dma_channel_direction_t dma_channel_direction)
855 {
856  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
857  QM_CHECK(dma_ctrl_id < QM_DMA_NUM, -EINVAL);
858  QM_CHECK(dma_channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
859 
860  dma_context_t *dma_context_p = NULL;
861  qm_dma_channel_config_t dma_chan_cfg = {0};
863  dma_chan_cfg.channel_direction = dma_channel_direction;
864  dma_chan_cfg.client_callback = spi_dma_callback;
865  dma_chan_cfg.transfer_type = QM_DMA_TYPE_SINGLE;
866 
867  /* Every data transfer performed by the DMA core corresponds to an SPI
868  * data frame, the SPI uses the number of bits determined by a previous
869  * qm_spi_set_config call where the frame size was specified.
870  */
871  switch (dfs[spi]) {
872  case 1:
874  break;
875 
876  case 2:
878  break;
879 
880  case 4:
882  break;
883 
884  default:
885  /* The DMA core cannot handle 3 byte frame sizes. */
886  return -EINVAL;
887  }
888  dma_chan_cfg.destination_transfer_width =
889  dma_chan_cfg.source_transfer_width;
890 
891  switch (dma_channel_direction) {
893 
894 #if (QUARK_SE)
895  dma_chan_cfg.handshake_interface =
896  (QM_SPI_MST_0 == spi) ? DMA_HW_IF_SPI_MASTER_0_TX
898 #else
900 #endif
901 
902  /* The DMA burst length has to fit in the space remaining in the
903  * TX FIFO after the watermark level, DMATDLR.
904  */
905  dma_chan_cfg.source_burst_length = SPI_DMA_WRITE_BURST_LENGTH;
906  dma_chan_cfg.destination_burst_length =
907  SPI_DMA_WRITE_BURST_LENGTH;
908 
909  dma_context_p = &dma_context_tx[spi];
910  break;
911 
913 
914 #if (QUARK_SE)
915  dma_chan_cfg.handshake_interface =
916  (QM_SPI_MST_0 == spi) ? DMA_HW_IF_SPI_MASTER_0_RX
918 #else
920 #endif
921  /* The DMA burst length has to match the value of the receive
922  * watermark level, DMARDLR + 1.
923  */
924  dma_chan_cfg.source_burst_length = SPI_DMA_READ_BURST_LENGTH;
925  dma_chan_cfg.destination_burst_length =
926  SPI_DMA_READ_BURST_LENGTH;
927 
928  dma_context_p = &dma_context_rx[spi];
929  break;
930 
931  default:
932  /* Memory to memory not allowed on SPI transfers. */
933  return -EINVAL;
934  }
935 
936  /* The DMA driver needs a pointer to the client callback function so
937  * that later we can identify to which SPI controller the DMA callback
938  * corresponds to as well as whether we are dealing with a TX or RX
939  * dma_context struct.
940  */
941  QM_ASSERT(dma_context_p);
942  dma_chan_cfg.callback_context = dma_context_p;
943 
944  /* To be used on received DMA callback. */
945  dma_context_p->spi_id = spi;
946  dma_context_p->dma_channel_id = dma_channel_id;
947 
948  /* To be used on transfer setup. */
949  dma_core[spi] = dma_ctrl_id;
950 
951  return qm_dma_channel_set_config(dma_ctrl_id, dma_channel_id,
952  &dma_chan_cfg);
953 }
954 
956  const qm_spi_async_transfer_t *const xfer)
957 {
958  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
959  QM_CHECK(xfer, -EINVAL);
960  QM_CHECK(xfer->tx_len
961  ? (xfer->tx &&
962  dma_context_tx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
963  : 1,
964  -EINVAL);
965  QM_CHECK(xfer->rx_len
966  ? (xfer->rx &&
967  dma_context_rx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
968  : 1,
969  -EINVAL);
970  QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX ? (xfer->tx && xfer->rx) : 1,
971  -EINVAL);
972  QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX
973  ? (xfer->tx_len == xfer->rx_len)
974  : 1,
975  -EINVAL);
976  QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->tx_len && !xfer->rx_len)
977  : 1,
978  -EINVAL);
979  QM_CHECK(tmode[spi] == QM_SPI_TMOD_RX ? (xfer->rx_len && !xfer->tx_len)
980  : 1,
981  -EINVAL);
982  QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
983  ? (xfer->tx_len && xfer->rx_len)
984  : 1,
985  -EINVAL);
986  QM_CHECK(dma_core[spi] < QM_DMA_NUM, -EINVAL);
987 
988  int ret;
989  qm_dma_transfer_t dma_trans = {0};
990  qm_spi_reg_t *const controller = QM_SPI[spi];
991  if (0 != controller->ssienr) {
992  return -EBUSY;
993  }
994 
995  /* Mask interrupts. */
996  controller->imr = QM_SPI_IMR_MASK_ALL;
997 
998  if (xfer->rx_len) {
999  dma_trans.block_size = xfer->rx_len;
1000  dma_trans.source_address = (uint32_t *)&controller->dr[0];
1001  dma_trans.destination_address = (uint32_t *)xfer->rx;
1003  dma_core[spi], dma_context_rx[spi].dma_channel_id,
1004  &dma_trans);
1005  if (ret) {
1006  return ret;
1007  }
1008 
1009  /* In RX-only or EEPROM mode, the ctrlr1 register holds how
1010  * many data frames the controller solicits, minus 1.
1011  */
1012  controller->ctrlr1 = xfer->rx_len - 1;
1013  }
1014 
1015  if (xfer->tx_len) {
1016  dma_trans.block_size = xfer->tx_len;
1017  dma_trans.source_address = (uint32_t *)xfer->tx;
1018  dma_trans.destination_address = (uint32_t *)&controller->dr[0];
1020  dma_core[spi], dma_context_tx[spi].dma_channel_id,
1021  &dma_trans);
1022  if (ret) {
1023  return ret;
1024  }
1025  }
1026 
1027  /* Transfer pointer kept to extract user callback address and transfer
1028  * client id when DMA completes.
1029  */
1030  spi_async_transfer[spi] = xfer;
1031 
1032  /* Enable the SPI device. */
1033  controller->ssienr = QM_SPI_SSIENR_SSIENR;
1034 
1035  if (xfer->rx_len) {
1036  /* Enable receive DMA. */
1037  controller->dmacr |= QM_SPI_DMACR_RDMAE;
1038 
1039  /* Set the DMA receive threshold. */
1040  controller->dmardlr = SPI_DMARDLR_DMARDL;
1041 
1042  dma_context_rx[spi].cb_pending = true;
1043 
1044  ret = qm_dma_transfer_start(dma_core[spi],
1045  dma_context_rx[spi].dma_channel_id);
1046  if (ret) {
1047  dma_context_rx[spi].cb_pending = false;
1048 
1049  /* Disable DMA setting and SPI controller. */
1050  controller->dmacr = 0;
1051  controller->ssienr = 0;
1052  return ret;
1053  }
1054 
1055  if (!xfer->tx_len) {
1056  /* In RX-only mode we need to transfer an initial dummy
1057  * byte.
1058  */
1059  write_frame(spi, (uint8_t *)&tx_dummy_frame);
1060  }
1061  }
1062 
1063  if (xfer->tx_len) {
1064  /* Enable transmit DMA. */
1065  controller->dmacr |= QM_SPI_DMACR_TDMAE;
1066 
1067  /* Set the DMA transmit threshold. */
1068  controller->dmatdlr = SPI_DMATDLR_DMATDL;
1069 
1070  dma_context_tx[spi].cb_pending = true;
1071 
1072  ret = qm_dma_transfer_start(dma_core[spi],
1073  dma_context_tx[spi].dma_channel_id);
1074  if (ret) {
1075  dma_context_tx[spi].cb_pending = false;
1076  if (xfer->rx_len) {
1077  /* If a RX transfer was previously started, we
1078  * need to stop it - the SPI device will be
1079  * disabled when handling the DMA callback.
1080  */
1082  } else {
1083  /* Disable DMA setting and SPI controller.*/
1084  controller->dmacr = 0;
1085  controller->ssienr = 0;
1086  }
1087  return ret;
1088  }
1089  }
1090 
1091  return 0;
1092 }
1093 
1095 {
1096  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
1097  QM_CHECK(dma_context_tx[spi].cb_pending
1098  ? (dma_context_tx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
1099  : 1,
1100  -EINVAL);
1101  QM_CHECK(dma_context_rx[spi].cb_pending
1102  ? (dma_context_rx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
1103  : 1,
1104  -EINVAL);
1105 
1106  int ret = 0;
1107 
1108  if (dma_context_tx[spi].cb_pending) {
1109  if (0 !=
1111  dma_core[spi], dma_context_tx[spi].dma_channel_id)) {
1112  ret = -EIO;
1113  }
1114  }
1115 
1116  if (dma_context_rx[spi].cb_pending) {
1117  if (0 !=
1119  dma_core[spi], dma_context_rx[spi].dma_channel_id)) {
1120  ret = -EIO;
1121  }
1122  }
1123 
1124  return ret;
1125 }
1126 
1127 #if (ENABLE_RESTORE_CONTEXT)
1129 {
1130  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
1131  QM_CHECK(ctx != NULL, -EINVAL);
1132 
1133  qm_spi_reg_t *const regs = QM_SPI[spi];
1134 
1135  ctx->ctrlr0 = regs->ctrlr0;
1136  ctx->ser = regs->ser;
1137  ctx->baudr = regs->baudr;
1138 
1139  return 0;
1140 }
1141 
1143  const qm_spi_context_t *const ctx)
1144 {
1145  QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
1146  QM_CHECK(ctx != NULL, -EINVAL);
1147 
1148  qm_spi_reg_t *const regs = QM_SPI[spi];
1149 
1150  regs->ctrlr0 = ctx->ctrlr0;
1151  regs->ser = ctx->ser;
1152  regs->baudr = ctx->baudr;
1153 
1154  return 0;
1155 }
1156 #else
1158 {
1159  (void)spi;
1160  (void)ctx;
1161 
1162  return 0;
1163 }
1164 
1166  const qm_spi_context_t *const ctx)
1167 {
1168  (void)spi;
1169  (void)ctx;
1170 
1171  return 0;
1172 }
1173 #endif /* ENABLE_RESTORE_CONTEXT */
uint32_t baudr
Baud Rate Select.
Definition: qm_soc_regs.h:1074
int qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *cfg)
Set SPI configuration.
Definition: qm_spi.c:423
qm_spi_slave_select_t
SPI slave select type.
Definition: qm_spi.h:81
QM_RW uint32_t dmardlr
DMA Receive Data Level.
Definition: qm_soc_regs.h:737
QM_RW uint32_t isr
Interrupt Status Register.
Definition: qm_soc_regs.h:725
QM_RW uint32_t txftlr
Transmit FIFO Threshold Level.
Definition: qm_soc_regs.h:719
DMA channel configuration structure.
Definition: qm_dma.h:77
qm_dma_transfer_type_t transfer_type
DMA transfer type.
Definition: qm_dma.h:100
QM_RW uint32_t dmacr
DMA Control Register.
Definition: qm_soc_regs.h:735
int qm_spi_irq_transfer(const qm_spi_t spi, const volatile qm_spi_async_transfer_t *const xfer)
Interrupt based transfer on SPI.
Definition: qm_spi.c:639
SPI_Master_0_RX.
Definition: qm_soc_regs.h:1499
int qm_spi_dma_channel_config(const qm_spi_t spi, const qm_dma_t dma_ctrl_id, const qm_dma_channel_id_t dma_channel_id, const qm_dma_channel_direction_t dma_channel_direction)
Configure a DMA channel with a specific transfer direction.
Definition: qm_spi.c:851
int qm_spi_dma_transfer_terminate(qm_spi_t spi)
Terminate the current DMA transfer on the SPI bus.
Definition: qm_spi.c:1094
uint16_t tx_len
Number of data frames to write.
Definition: qm_spi.h:148
int qm_spi_transfer(const qm_spi_t spi, const qm_spi_transfer_t *const xfer, qm_spi_status_t *const status)
Multi-frame read / write on SPI.
Definition: qm_spi.c:499
QM_RW uint32_t ctrlr1
Control Register 1.
Definition: qm_soc_regs.h:714
SPI synchronous transfer type.
Definition: qm_spi.h:181
QM_RW uint32_t baudr
Baud Rate Select.
Definition: qm_soc_regs.h:718
uint32_t * destination_address
DMA destination transfer address.
Definition: qm_dma.h:136
qm_dma_t
DMA instances.
Definition: qm_soc_regs.h:1480
int qm_spi_slave_select(const qm_spi_t spi, const qm_spi_slave_select_t ss)
Select which slave to perform SPI transmissions on.
Definition: qm_spi.c:465
void * rx
Read data.
Definition: qm_spi.h:147
Number of DMA channels.
Definition: qm_soc_regs.h:1489
QM_RW uint32_t ssienr
SSI Enable Register.
Definition: qm_soc_regs.h:715
DMA single block transfer configuration structure.
Definition: qm_dma.h:133
qm_dma_burst_length_t source_burst_length
DMA source burst length.
Definition: qm_dma.h:94
uint32_t ser
Slave Enable Register.
Definition: qm_soc_regs.h:1073
uint32_t block_size
DMA block size, Min = 1, Max = 4095.
Definition: qm_dma.h:134
void * callback_context
DMA client context passed to the callbacks.
Definition: qm_dma.h:113
void * rx
Read data.
Definition: qm_spi.h:183
Transmit & Receive.
Definition: qm_spi.h:58
Transfer width of 8 bits.
Definition: qm_dma.h:45
SPI context type.
Definition: qm_soc_regs.h:1071
SPI_Master_1_RX.
Definition: qm_soc_regs.h:1742
int qm_spi_irq_transfer_terminate(const qm_spi_t spi)
Terminate SPI IRQ transfer.
Definition: qm_spi.c:736
Appl.
Definition: qm_spi.h:96
qm_spi_reg_t * qm_spi_controllers[QM_SPI_NUM]
Extern qm_spi_reg_t* array declared at qm_soc_regs.h .
Definition: qm_spi.c:66
Standard SPI mode.
Definition: qm_spi.h:105
qm_dma_handshake_interface_t handshake_interface
DMA channel handshake interface ID.
Definition: qm_dma.h:79
int qm_dma_transfer_set_config(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_transfer_t *const transfer_config)
Setup a DMA single block transfer.
Definition: qm_dma.c:345
Transmit Only.
Definition: qm_spi.h:59
Receive Only.
Definition: qm_spi.h:60
void(* callback)(void *data, int error, qm_spi_status_t status, uint16_t len)
Transfer callback.
Definition: qm_spi.h:167
qm_dma_handshake_polarity_t handshake_polarity
DMA channel handshake polarity.
Definition: qm_dma.h:82
Single block mode.
Definition: qm_dma.h:67
QM_RW uint32_t rxflr
Receive FIFO Level Register.
Definition: qm_soc_regs.h:722
QM_ISR_DECLARE(qm_spi_master_0_isr)
ISR for SPI Master 0 interrupt.
Definition: qm_spi.c:716
bool keep_enabled
Keep device on once transfer is done.
Definition: qm_spi.h:150
SPI register map.
Definition: qm_soc_regs.h:712
QM_RW uint32_t dr[36]
Data Register.
Definition: qm_soc_regs.h:740
qm_spi_t
Number of SPI controllers.
Definition: qm_soc_regs.h:709
QM_RW uint32_t risr
Raw Interrupt Status Register.
Definition: qm_soc_regs.h:726
SPI_Master_1_TX.
Definition: qm_soc_regs.h:1741
uint16_t tx_len
Number of data frames to write.
Definition: qm_spi.h:184
Peripheral to memory transfer.
Definition: qm_dma.h:60
qm_dma_burst_length_t destination_burst_length
DMA destination burst length.
Definition: qm_dma.h:97
qm_spi_status_t
SPI status.
Definition: qm_spi.h:92
int qm_spi_dma_transfer(const qm_spi_t spi, const qm_spi_async_transfer_t *const xfer)
Perform a DMA-based transfer on the SPI bus.
Definition: qm_spi.c:955
qm_dma_transfer_width_t destination_transfer_width
DMA destination transfer width.
Definition: qm_dma.h:91
int qm_dma_channel_set_config(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_channel_config_t *const channel_config)
Setup a DMA channel configuration.
Definition: qm_dma.c:254
SPI aynchronous transfer type.
Definition: qm_spi.h:145
qm_spi_tmode_t
SPI transfer mode type.
Definition: qm_spi.h:57
SPI device is busy.
Definition: qm_spi.h:94
Number of DMA controllers.
Definition: qm_soc_regs.h:1482
int qm_spi_save_context(const qm_spi_t spi, qm_spi_context_t *const ctx)
Save SPI context.
Definition: qm_spi.c:1128
qm_dma_transfer_width_t source_transfer_width
DMA source transfer width.
Definition: qm_dma.h:88
QM_RW uint32_t sr
Status Register.
Definition: qm_soc_regs.h:723
int qm_dma_transfer_terminate(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
Terminate a DMA transfer.
Definition: qm_dma.c:618
qm_dma_channel_direction_t
DMA channel direction.
Definition: qm_dma.h:56
int qm_dma_transfer_start(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
Start a DMA transfer.
Definition: qm_dma.c:582
QM_RW uint32_t imr
Interrupt Mask Register.
Definition: qm_soc_regs.h:724
Set HS polarity high.
Definition: qm_dma.h:22
SPI device is not in use.
Definition: qm_spi.h:93
QM_RW uint32_t rxoicr
Rx FIFO Overflow Interrupt Clear Register.
Definition: qm_soc_regs.h:730
QM_RW uint32_t ser
Slave Enable Register.
Definition: qm_soc_regs.h:717
void * tx
Write data.
Definition: qm_spi.h:146
Appl.
Definition: qm_spi.h:97
Memory to peripheral transfer.
Definition: qm_dma.h:58
QM_RW uint32_t ctrlr0
Control Register 0.
Definition: qm_soc_regs.h:713
SPI_Master_0_TX.
Definition: qm_soc_regs.h:1498
Transfer width of 16 bits.
Definition: qm_dma.h:46
void * callback_data
Callback user data.
Definition: qm_spi.h:169
uint32_t * source_address
DMA source transfer address.
Definition: qm_dma.h:135
uint32_t ctrlr0
Control Register 0.
Definition: qm_soc_regs.h:1072
int qm_spi_get_status(const qm_spi_t spi, qm_spi_status_t *const status)
Get SPI bus status.
Definition: qm_spi.c:479
Transfer width of 32 bits.
Definition: qm_dma.h:47
uint16_t rx_len
Number of data frames to read.
Definition: qm_spi.h:149
qm_spi_frame_format_t
QM SPI Frame Format.
Definition: qm_spi.h:103
int qm_spi_irq_update(const qm_spi_t spi, const volatile qm_spi_async_transfer_t *const xfer, const qm_spi_update_t update)
Update parameters of Interrupt based transfer on SPI.
Definition: qm_spi.c:584
qm_dma_channel_id_t
DMA channel IDs.
Definition: qm_soc_regs.h:1486
void * tx
Write data.
Definition: qm_spi.h:182
QM_RW uint32_t txflr
Transmit FIFO Level Register.
Definition: qm_soc_regs.h:721
QM_RW uint32_t dmatdlr
DMA Transmit Data Level.
Definition: qm_soc_regs.h:736
qm_dma_channel_direction_t channel_direction
DMA channel direction.
Definition: qm_dma.h:85
uint16_t rx_len
Number of data frames to read.
Definition: qm_spi.h:185
int qm_spi_restore_context(const qm_spi_t spi, const qm_spi_context_t *const ctx)
Restore SPI context.
Definition: qm_spi.c:1142
QM_RW uint32_t rxftlr
Receive FIFO Threshold Level.
Definition: qm_soc_regs.h:720
void(* client_callback)(void *callback_context, uint32_t len, int error_code)
Client callback for DMA transfer ISR.
Definition: qm_dma.h:109
RX transfer has overflown.
Definition: qm_spi.h:95
EEPROM Read.
Definition: qm_spi.h:61