Intel® Quark™ Microcontroller Software Interface  1.4.0
Intel® Quark™ Microcontroller BSP
qm_ss_spi.c
1 /*
2  * {% copyright %}
3  */
4 
5 #include "qm_ss_spi.h"
6 
7 #define FIFO_RX_W_MARK (6) /* Interrupt mark to read RX FIFO */
8 #define FIFO_TX_W_MARK (3) /* Interrupt mark to write TX FIFO */
9 
10 #define BYTES_PER_FRAME(reg_data) \
11  ((((reg_data & QM_SS_SPI_CTRL_DFS_MASK) >> QM_SS_SPI_CTRL_DFS_OFFS) >> \
12  3) + \
13  1)
14 
15 static uint32_t base[QM_SS_SPI_NUM] = {QM_SS_SPI_0_BASE, QM_SS_SPI_1_BASE};
16 
17 static const qm_ss_spi_async_transfer_t *spi_async_transfer[QM_SS_SPI_NUM];
18 static uint32_t rx_c[QM_SS_SPI_NUM];
19 static uint32_t tx_c[QM_SS_SPI_NUM];
20 
21 static const uint16_t dummy_frame = 0;
22 
23 /* Private Functions */
24 static void spi_disable(const qm_ss_spi_t spi)
25 {
26  /* Disable SPI device */
27  QM_SS_REG_AUX_NAND(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN);
28  /* MASK all interrupts. */
29  QM_SS_SPI_INTERRUPT_MASK_WRITE(0, base[spi]);
30  /* Clear all interrupts */
31  QM_SS_SPI_INTERRUPT_CLEAR_WRITE(QM_SS_SPI_INTR_ALL, base[spi]);
32 }
33 
34 static __inline__ void fifo_write(const qm_ss_spi_t spi, const void *data,
35  uint8_t size)
36 {
37  uint32_t dr;
38 
39  if (size == 1) {
40  dr = *(uint8_t *)data;
41  } else if (size == 2) {
42  dr = *(uint16_t *)data;
43  } else {
44  dr = *(uint32_t *)data;
45  }
46  dr |= QM_SS_SPI_DR_W_MASK;
47 
48  __builtin_arc_sr(dr, base[spi] + QM_SS_SPI_DR);
49 }
50 
51 static __inline__ void fifo_read(const qm_ss_spi_t spi, void *data,
52  uint8_t size)
53 {
54  QM_SS_SPI_DUMMY_WRITE(base[spi]);
55  if (size == 1) {
56  *(uint8_t *)data = __builtin_arc_lr(base[spi] + QM_SS_SPI_DR);
57  } else if (size == 2) {
58  *(uint16_t *)data = __builtin_arc_lr(base[spi] + QM_SS_SPI_DR);
59  } else {
60  *(uint32_t *)data = __builtin_arc_lr(base[spi] + QM_SS_SPI_DR);
61  }
62 }
63 
64 /* Public Functions */
66  const qm_ss_spi_config_t *const cfg)
67 {
68  QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
69  QM_CHECK(cfg, -EINVAL);
70 
71  uint32_t ctrl = 0;
72 
73  /* Configuration can be changed only when SPI is disabled */
74  if (0 != (__builtin_arc_lr(base[spi] + QM_SS_SPI_SPIEN) &
75  QM_SS_SPI_SPIEN_EN)) {
76  return -EBUSY;
77  }
78 
79  /* Enable clock to peripheral to allow register writes */
80  QM_SS_SPI_ENABLE_REG_WRITES(base[spi]);
81 
82  ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
83  ctrl &= ~(QM_SS_SPI_CTRL_DFS_MASK | QM_SS_SPI_CTRL_TMOD_MASK |
84  QM_SS_SPI_CTRL_BMOD_MASK);
85  ctrl |= cfg->frame_size << QM_SS_SPI_CTRL_DFS_OFFS;
86  ctrl |= cfg->transfer_mode << QM_SS_SPI_CTRL_TMOD_OFFS;
87  ctrl |= cfg->bus_mode << QM_SS_SPI_CTRL_BMOD_OFFS;
88 
89  QM_SS_SPI_CTRL_WRITE(ctrl, base[spi]);
90 
91  QM_SS_SPI_BAUD_RATE_WRITE(cfg->clk_divider, base[spi]);
92 
93  return 0;
94 }
95 
97  const qm_ss_spi_slave_select_t ss)
98 {
99  QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
100 
101  /* Check if the device reports as busy. */
102  if (__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY) {
103  return -EBUSY;
104  }
105 
106  QM_SS_SPI_SER_WRITE(ss, base[spi]);
107 
108  return 0;
109 }
110 
112  qm_ss_spi_status_t *const status)
113 {
114  QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
115  QM_CHECK(status, -EINVAL);
116 
117  if (__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY) {
118  *status = QM_SS_SPI_BUSY;
119  } else {
120  *status = QM_SS_SPI_IDLE;
121  }
122 
123  return 0;
124 }
125 
127  const qm_ss_spi_transfer_t *const xfer,
128  qm_ss_spi_status_t *const status)
129 {
130  QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
131  QM_CHECK(xfer, -EINVAL);
132 
133  uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
134  uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
135  QM_SS_SPI_CTRL_TMOD_OFFS);
136 
137  QM_CHECK(tmode == QM_SS_SPI_TMOD_TX_RX ? (xfer->tx_len == xfer->rx_len)
138  : 1,
139  -EINVAL);
140  QM_CHECK(tmode == QM_SS_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1, -EINVAL);
141  QM_CHECK(tmode == QM_SS_SPI_TMOD_EEPROM_READ ? (xfer->rx_len > 0) : 1,
142  -EINVAL);
143  QM_CHECK(tmode == QM_SS_SPI_TMOD_RX ? (xfer->rx_len > 0) : 1, -EINVAL);
144  QM_CHECK(tmode == QM_SS_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1, -EINVAL);
145 
146  uint32_t tx_cnt = xfer->tx_len;
147  uint32_t rx_cnt = xfer->rx_len;
148  uint8_t *rx_buffer = xfer->rx;
149  uint8_t *tx_buffer = xfer->tx;
150  int ret = 0;
151  uint32_t sr = 0;
152  /* Calculate number of bytes per frame */
153  uint8_t bytes = BYTES_PER_FRAME(ctrl);
154 
155  /* Disable all SPI interrupts */
156  QM_SS_SPI_INTERRUPT_MASK_WRITE(0, base[spi]);
157 
158  /* Set NDF (Number of Data Frames) in RX or EEPROM Read mode. (-1) */
159  if (tmode == QM_SS_SPI_TMOD_RX || tmode == QM_SS_SPI_TMOD_EEPROM_READ) {
160  QM_SS_SPI_NDF_WRITE((xfer->rx_len - 1), base[spi]);
161  }
162 
163  /* RX only transfers need a dummy frame to be sent. */
164  if (tmode == QM_SS_SPI_TMOD_RX) {
165  tx_buffer = (uint8_t *)&dummy_frame;
166  tx_cnt = 1;
167  }
168 
169  /* Enable SPI device */
170  QM_SS_REG_AUX_OR(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN);
171 
172  while (tx_cnt || rx_cnt) {
173  sr = __builtin_arc_lr(base[spi] + QM_SS_SPI_SR);
174  /* Break and report error if RX FIFO has overflown */
175  if (QM_SS_SPI_INTERRUPT_STATUS_READ(base[spi]) &
176  QM_SS_SPI_INTR_RXOI) {
177  ret = -EIO;
178  if (status) {
179  *status |= QM_SS_SPI_RX_OVERFLOW;
180  }
181  break;
182  }
183  /* Copy data to buffer as long RX-FIFO is not empty */
184  if (sr & QM_SS_SPI_SR_RFNE && rx_cnt) {
185  fifo_read(spi, rx_buffer, bytes);
186  rx_buffer += bytes;
187  rx_cnt--;
188  }
189  /* Copy data from buffer as long TX-FIFO is not full. */
190  if (sr & QM_SS_SPI_SR_TFNF && tx_cnt) {
191  fifo_write(spi, tx_buffer, bytes);
192  tx_buffer += bytes;
193  tx_cnt--;
194  }
195  }
196  /* Wait for last byte transferred */
197  while (__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY)
198  ;
199 
200  spi_disable(spi);
201  return ret;
202 }
203 
204 /* Interrupt related functions. */
205 
207  const qm_ss_spi_async_transfer_t *const xfer)
208 {
209  QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
210  QM_CHECK(xfer, -EINVAL);
211 
212  /* Load and save initial control register */
213  uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
214  uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
215  QM_SS_SPI_CTRL_TMOD_OFFS);
216  uint8_t bytes = BYTES_PER_FRAME(ctrl);
217 
218  QM_CHECK(tmode == QM_SS_SPI_TMOD_TX_RX ? (xfer->tx_len == xfer->rx_len)
219  : 1,
220  -EINVAL);
221 
222  uint32_t rftlr = 0;
223  uint32_t tftlr = 0;
224 
225  spi_async_transfer[spi] = xfer;
226  tx_c[spi] = xfer->tx_len;
227  rx_c[spi] = xfer->rx_len;
228 
229  /* Set NDF (Number of Data Frames) in RX or EEPROM Read mode. (-1) */
230  if (tmode == QM_SS_SPI_TMOD_RX || tmode == QM_SS_SPI_TMOD_EEPROM_READ) {
231  QM_SS_SPI_NDF_WRITE((xfer->rx_len - 1), base[spi]);
232  }
233 
234  rftlr =
235  (((FIFO_RX_W_MARK < xfer->rx_len ? FIFO_RX_W_MARK : xfer->rx_len) -
236  1));
237  tftlr = FIFO_TX_W_MARK;
238 
239  /* Set FIFO threshold levels */
240  QM_SS_SPI_RFTLR_WRITE(rftlr, base[spi]);
241  QM_SS_SPI_TFTLR_WRITE(tftlr, base[spi]);
242 
243  /* Unmask all interrupts */
244  QM_SS_SPI_INTERRUPT_MASK_WRITE(QM_SS_SPI_INTR_ALL, base[spi]);
245 
246  /* Enable SPI device */
247  QM_SS_REG_AUX_OR(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN);
248 
249  /* RX only transfers need a dummy frame byte to be sent. */
250  if (tmode == QM_SS_SPI_TMOD_RX) {
251  fifo_write(spi, (uint8_t *)&dummy_frame, bytes);
252  }
253 
254  return 0;
255 }
256 
258 {
259  QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
260  const qm_ss_spi_async_transfer_t *const transfer =
261  spi_async_transfer[spi];
262  uint32_t len = 0;
263  uint32_t ctrl = 0;
264  uint8_t tmode = 0;
265 
266  spi_disable(spi);
267 
268  if (transfer->callback) {
269  ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
270  tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
271  QM_SS_SPI_CTRL_TMOD_OFFS);
272  if (tmode == QM_SS_SPI_TMOD_TX ||
273  tmode == QM_SS_SPI_TMOD_TX_RX) {
274  len = transfer->tx_len - tx_c[spi];
275  } else {
276  len = transfer->rx_len - rx_c[spi];
277  }
278 
279  /*
280  * NOTE: change this to return controller-specific code
281  * 'user aborted'.
282  */
283  transfer->callback(transfer->callback_data, -ECANCELED,
284  QM_SS_SPI_IDLE, (uint16_t)len);
285  }
286 
287  return 0;
288 }
289 
290 static void handle_spi_err_interrupt(const qm_ss_spi_t spi)
291 {
292  uint32_t intr_stat = QM_SS_SPI_INTERRUPT_STATUS_READ(base[spi]);
293  const qm_ss_spi_async_transfer_t *const transfer =
294  spi_async_transfer[spi];
295 
296  spi_disable(spi);
297 
298 #if HAS_SS_SPI_VERBOSE_ERROR
299  if ((intr_stat & QM_SS_SPI_INTR_TXOI) && transfer->callback) {
300  transfer->callback(transfer->callback_data, -EIO,
302  transfer->tx_len - tx_c[spi]);
303  }
304 
305  if ((intr_stat & QM_SS_SPI_INTR_RXUI) && transfer->callback) {
306  transfer->callback(transfer->callback_data, -EIO,
308  transfer->rx_len - rx_c[spi]);
309  }
310 #else /* HAS_SS_SPI_VERBOSE_ERROR */
311  QM_ASSERT((intr_stat & QM_SS_SPI_INTR_STAT_TXOI) == 0);
312  QM_ASSERT((intr_stat & QM_SS_SPI_INTR_STAT_RXUI) == 0);
313 #endif /* HAS_SS_SPI_VERBOSE_ERROR */
314 
315  if ((intr_stat & QM_SS_SPI_INTR_RXOI) && transfer->callback) {
316  transfer->callback(transfer->callback_data, -EIO,
318  transfer->rx_len - rx_c[spi]);
319  }
320 }
321 
322 static void handle_spi_tx_interrupt(const qm_ss_spi_t spi)
323 {
324  uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
325  /* Calculate number of bytes per frame */
326  uint8_t bytes = BYTES_PER_FRAME(ctrl);
327  uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
328  QM_SS_SPI_CTRL_TMOD_OFFS);
329  uint32_t rxflr = 0;
330  uint32_t txflr = 0;
331  int32_t cnt = 0;
332  const qm_ss_spi_async_transfer_t *const transfer =
333  spi_async_transfer[spi];
334 
335  /* Clear Transmit Fifo Emtpy interrupt */
336  QM_SS_SPI_INTERRUPT_CLEAR_WRITE(QM_SS_SPI_INTR_TXEI, base[spi]);
337 
338  /* Jump to the right position of TX buffer.
339  * If no bytes were transmitted before, we start from the beginning,
340  * otherwise we jump to the next frame to be sent.
341  */
342  const uint8_t *tx_buffer =
343  transfer->tx + ((transfer->tx_len - tx_c[spi]) * bytes);
344 
345  if (tx_c[spi] == 0 &&
346  !(__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY)) {
347  if (tmode == QM_SS_SPI_TMOD_TX) {
348  spi_disable(spi);
349  if (transfer->callback) {
350  transfer->callback(transfer->callback_data, 0,
352  transfer->tx_len);
353  }
354  } else {
355  QM_SS_SPI_INTERRUPT_MASK_NAND(QM_SS_SPI_INTR_TXEI,
356  base[spi]);
357  }
358  return;
359  }
360  /* Make sure RX fifo does not overflow */
361  rxflr = __builtin_arc_lr(base[spi] + QM_SS_SPI_RXFLR);
362  txflr = __builtin_arc_lr(base[spi] + QM_SS_SPI_TXFLR);
363  cnt = QM_SS_SPI_FIFO_DEPTH - rxflr - txflr - 1;
364  while (tx_c[spi] && cnt > 0) {
365  fifo_write(spi, tx_buffer, bytes);
366  tx_buffer += bytes;
367  tx_c[spi]--;
368  cnt--;
369  }
370 }
371 
372 static void handle_spi_rx_interrupt(const qm_ss_spi_t spi)
373 {
374  uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
375  /* Calculate number of bytes per frame */
376  uint8_t bytes = BYTES_PER_FRAME(ctrl);
377  const qm_ss_spi_async_transfer_t *const transfer =
378  spi_async_transfer[spi];
379  uint32_t new_irq_level = 0;
380 
381  /* Clear RX-FIFO FULL interrupt */
382  QM_SS_SPI_INTERRUPT_CLEAR_WRITE(QM_SS_SPI_INTR_RXFI, base[spi]);
383 
384  /*
385  * Jump to the right position of RX buffer.
386  * If no bytes were received before, we start from the beginning,
387  * otherwise we jump to the next available frame position.
388  */
389  uint8_t *rx_buffer =
390  transfer->rx + ((transfer->rx_len - rx_c[spi]) * bytes);
391 
392  while (__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_RFNE &&
393  rx_c[spi]) {
394  fifo_read(spi, rx_buffer, bytes);
395  rx_buffer += bytes;
396  rx_c[spi]--;
397  }
398  /* Set new FIFO threshold or complete transfer */
399  new_irq_level =
400  (FIFO_RX_W_MARK < rx_c[spi] ? FIFO_RX_W_MARK : rx_c[spi]);
401  if (rx_c[spi]) {
402  new_irq_level--;
403  QM_SS_SPI_RFTLR_WRITE(new_irq_level, base[spi]);
404  } else {
405  spi_disable(spi);
406  if (transfer->callback) {
407  transfer->callback(transfer->callback_data, 0,
408  QM_SS_SPI_IDLE, transfer->rx_len);
409  }
410  }
411 }
412 
413 QM_ISR_DECLARE(qm_ss_spi_0_error_isr)
414 {
415  handle_spi_err_interrupt(QM_SS_SPI_0);
416 }
417 QM_ISR_DECLARE(qm_ss_spi_1_error_isr)
418 {
419  handle_spi_err_interrupt(QM_SS_SPI_1);
420 }
421 QM_ISR_DECLARE(qm_ss_spi_0_rx_avail_isr)
422 {
423  handle_spi_rx_interrupt(QM_SS_SPI_0);
424 }
425 QM_ISR_DECLARE(qm_ss_spi_1_rx_avail_isr)
426 {
427  handle_spi_rx_interrupt(QM_SS_SPI_1);
428 }
429 QM_ISR_DECLARE(qm_ss_spi_0_tx_req_isr)
430 {
431  handle_spi_tx_interrupt(QM_SS_SPI_0);
432 }
433 QM_ISR_DECLARE(qm_ss_spi_1_tx_req_isr)
434 {
435  handle_spi_tx_interrupt(QM_SS_SPI_1);
436 }
437 
438 #if (ENABLE_RESTORE_CONTEXT)
440  qm_ss_spi_context_t *const ctx)
441 {
442  const uint32_t controller = base[spi];
443 
444  QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
445  QM_CHECK(ctx != NULL, -EINVAL);
446 
447  ctx->spi_timing = __builtin_arc_lr(controller + QM_SS_SPI_TIMING);
448  ctx->spi_spien = __builtin_arc_lr(controller + QM_SS_SPI_SPIEN);
449  ctx->spi_ctrl = __builtin_arc_lr(controller + QM_SS_SPI_CTRL);
450 
451  return 0;
452 }
453 
455  const qm_ss_spi_context_t *const ctx)
456 {
457  const uint32_t controller = base[spi];
458 
459  QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
460  QM_CHECK(ctx != NULL, -EINVAL);
461 
462  __builtin_arc_sr(ctx->spi_timing, controller + QM_SS_SPI_TIMING);
463  __builtin_arc_sr(ctx->spi_spien, controller + QM_SS_SPI_SPIEN);
464  __builtin_arc_sr(ctx->spi_ctrl, controller + QM_SS_SPI_CTRL);
465 
466  return 0;
467 }
468 #else
470  qm_ss_spi_context_t *const ctx)
471 {
472  (void)spi;
473  (void)ctx;
474 
475  return 0;
476 }
477 
479  const qm_ss_spi_context_t *const ctx)
480 {
481  (void)spi;
482  (void)ctx;
483 
484  return 0;
485 }
486 #endif /* ENABLE_RESTORE_CONTEXT */
qm_ss_spi_slave_select_t
SPI Slave select type.
Definition: qm_ss_spi.h:107
void * tx
Write data.
Definition: qm_ss_spi.h:154
SPI status register.
Transmit & Receive mode.
Definition: qm_ss_spi.h:64
SPI configuration type.
Definition: qm_ss_spi.h:132
uint32_t spi_timing
Timing Register.
int qm_ss_spi_irq_transfer_terminate(const qm_ss_spi_t spi)
Terminate SPI IRQ transfer.
Definition: qm_ss_spi.c:257
SPI module 0.
int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi, const qm_ss_spi_async_transfer_t *const xfer)
Initiate an interrupt based SPI transfer.
Definition: qm_ss_spi.c:206
uint16_t tx_len
Number of data frames to write.
Definition: qm_ss_spi.h:156
Sensor Subsystem SPI context type.
TX transfer has overflown.
Definition: qm_ss_spi.h:123
uint16_t tx_len
Number of data frames to write.
Definition: qm_ss_spi.h:188
SPI asynchronous transfer type.
Definition: qm_ss_spi.h:153
int qm_ss_spi_set_config(const qm_ss_spi_t spi, const qm_ss_spi_config_t *const cfg)
Set SPI configuration.
Definition: qm_ss_spi.c:65
int qm_ss_spi_get_status(const qm_ss_spi_t spi, qm_ss_spi_status_t *const status)
Get SPI bus status.
Definition: qm_ss_spi.c:111
int qm_ss_spi_slave_select(const qm_ss_spi_t spi, const qm_ss_spi_slave_select_t ss)
Set Slave Select lines.
Definition: qm_ss_spi.c:96
EEPROM-Read Mode.
Definition: qm_ss_spi.h:89
Receive-Only mode.
Definition: qm_ss_spi.h:80
int qm_ss_spi_transfer(const qm_ss_spi_t spi, const qm_ss_spi_transfer_t *const xfer, qm_ss_spi_status_t *const status)
Perform a blocking SPI transfer.
Definition: qm_ss_spi.c:126
RX transfer has underflown.
Definition: qm_ss_spi.h:124
SPI device is busy.
Definition: qm_ss_spi.h:120
void(* callback)(void *data, int error, qm_ss_spi_status_t status, uint16_t len)
Transfer callback.
Definition: qm_ss_spi.h:171
RW buffer for FIFOs.
qm_ss_spi_tmode_t transfer_mode
Transfer mode (enum)
Definition: qm_ss_spi.h:134
void * tx
Write data.
Definition: qm_ss_spi.h:186
SPI serial clock divider value.
Number of valid data entries in TX FIFO.
SPI module 1.
SPI device is not in use.
Definition: qm_ss_spi.h:119
SPI enable register.
int qm_ss_spi_save_context(const qm_ss_spi_t spi, qm_ss_spi_context_t *const ctx)
Save SS SPI context.
Definition: qm_ss_spi.c:439
Transmit-Only mode.
Definition: qm_ss_spi.h:72
qm_ss_spi_frame_size_t frame_size
Frame Size.
Definition: qm_ss_spi.h:133
uint16_t clk_divider
SCK = SPI_clock/clk_divider.
Definition: qm_ss_spi.h:141
int qm_ss_spi_restore_context(const qm_ss_spi_t spi, const qm_ss_spi_context_t *const ctx)
Restore SS SPI context.
Definition: qm_ss_spi.c:454
uint16_t rx_len
Number of data frames to read.
Definition: qm_ss_spi.h:157
SPI control register.
void * rx
Read data.
Definition: qm_ss_spi.h:155
void * rx
Read data.
Definition: qm_ss_spi.h:187
uint32_t spi_ctrl
Control Register.
uint32_t spi_spien
SPI Enable Register.
qm_ss_spi_t
Sensor Subsystem SPI modules.
void * callback_data
Callback user data.
Definition: qm_ss_spi.h:173
Number of valid data entries in RX FIFO.
QM_ISR_DECLARE(qm_ss_spi_0_error_isr)
ISR for SPI 0 error interrupt.
Definition: qm_ss_spi.c:413
qm_ss_spi_status_t
SPI status.
Definition: qm_ss_spi.h:118
SPI synchronous transfer type.
Definition: qm_ss_spi.h:185
RX transfer has overflown.
Definition: qm_ss_spi.h:121
uint16_t rx_len
Number of data frames to read.
Definition: qm_ss_spi.h:189
qm_ss_spi_bmode_t bus_mode
Bus mode (enum)
Definition: qm_ss_spi.h:135