Intel® Quark™ Microcontroller Software Interface  1.4.0
Intel® Quark™ Microcontroller BSP
qm_dma.c
1 /*
2  * {% copyright %}
3  */
4 
5 #include "clk.h"
6 #include "dma.h"
7 
8 #ifndef UNIT_TEST
9 qm_dma_reg_t *qm_dma[QM_DMA_NUM] = {(qm_dma_reg_t *)QM_DMA_BASE};
10 #endif
11 
12 /* DMA driver private data structures */
13 dma_cfg_prv_t dma_channel_config[QM_DMA_NUM][QM_DMA_CHANNEL_NUM] = {{{0}}};
14 
15 /*
16  * Transfer interrupt handler.
17  * - Single block: TFR triggers an user callback invocation
18  * - Multiblock (contiguous): TFR triggers an user callback invocation, block
19  * interrupts are silent
20  * - Multiblock (linked list): Last block interrupt on each buffer triggers an
21  * user callback invocation, TFR is silent
22  */
23 static void qm_dma_isr_handler(const qm_dma_t dma,
24  const qm_dma_channel_id_t channel_id)
25 {
26  dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
27  volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
28  volatile qm_dma_chan_reg_t *chan_reg =
29  &QM_DMA[dma]->chan_reg[channel_id];
30  uint32_t transfer_length =
31  get_transfer_length(dma, channel_id, prv_cfg);
32 
33  /* The status can't be asserted here as there is a possible race
34  * condition when terminating channels. It's possible that an interrupt
35  * can be generated before the terminate function masks the
36  * interrupts. */
37 
38  if (int_reg->status_int_low & QM_DMA_INT_STATUS_TFR) {
39 
40  QM_ASSERT(int_reg->status_tfr_low & BIT(channel_id));
41 
42  /* Transfer completed, clear interrupt */
43  int_reg->clear_tfr_low = BIT(channel_id);
44 
45  /* If multiblock, the final block is also completed. */
46  int_reg->clear_block_low = BIT(channel_id);
47 
48  /* Mask interrupts for this channel */
49  int_reg->mask_block_low = BIT(channel_id) << 8;
50  int_reg->mask_tfr_low = BIT(channel_id) << 8;
51  int_reg->mask_err_low = BIT(channel_id) << 8;
52 
53  /* Clear llp register */
54  chan_reg->llp_low = 0;
55 
56  /*
57  * Call the callback if registered and pass the transfer length.
58  */
59  if (prv_cfg->client_callback) {
60  /* Single block or contiguous multiblock. */
61  prv_cfg->client_callback(prv_cfg->callback_context,
62  transfer_length, 0);
63  }
64  } else if (int_reg->status_int_low & QM_DMA_INT_STATUS_BLOCK) {
65  /* Block interrupts are only unmasked in multiblock mode. */
66  QM_ASSERT(int_reg->status_block_low & BIT(channel_id));
67 
68  /* Block completed, clear interrupt. */
69  int_reg->clear_block_low = BIT(channel_id);
70 
71  prv_cfg->num_blocks_int_pending--;
72 
73  if (NULL != prv_cfg->lli_tail &&
74  0 == prv_cfg->num_blocks_int_pending) {
75  /*
76  * Linked list mode, invoke callback if this is last
77  * block of buffer.
78  */
79  if (prv_cfg->client_callback) {
80  prv_cfg->client_callback(
81  prv_cfg->callback_context, transfer_length,
82  0);
83  }
84 
85  /* Buffer done, set for next buffer. */
86  prv_cfg->num_blocks_int_pending =
87  prv_cfg->num_blocks_per_buffer;
88 
89  } else if (NULL == prv_cfg->lli_tail) {
90  QM_ASSERT(prv_cfg->num_blocks_int_pending <
91  prv_cfg->num_blocks_per_buffer);
92  if (1 == prv_cfg->num_blocks_int_pending) {
93  /*
94  * Contiguous mode. We have just processed the
95  * next to last block, clear CFG.RELOAD so
96  * that the next block is the last one to be
97  * transfered.
98  */
99  chan_reg->cfg_low &=
100  ~QM_DMA_CFG_L_RELOAD_SRC_MASK;
101  chan_reg->cfg_low &=
102  ~QM_DMA_CFG_L_RELOAD_DST_MASK;
103  }
104  }
105  }
106 }
107 
108 /*
109  * Error interrupt handler.
110  */
111 static void qm_dma_isr_err_handler(const qm_dma_t dma)
112 {
113  uint32_t interrupt_channel_mask;
114  dma_cfg_prv_t *chan_cfg;
115  qm_dma_channel_id_t channel_id = 0;
116  volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
117 
118  QM_ASSERT(int_reg->status_int_low & QM_DMA_INT_STATUS_ERR);
119  QM_ASSERT(int_reg->status_err_low);
120 
121  interrupt_channel_mask = int_reg->status_err_low;
122  while (interrupt_channel_mask) {
123 
124  /* Find the channel that the interrupt is for */
125  if (!(interrupt_channel_mask & 0x1)) {
126  interrupt_channel_mask >>= 1;
127  channel_id++;
128  continue;
129  }
130 
131  /* Clear the error interrupt for this channel */
132  int_reg->clear_err_low = BIT(channel_id);
133 
134  /* Mask interrupts for this channel */
135  int_reg->mask_block_low = BIT(channel_id) << 8;
136  int_reg->mask_tfr_low = BIT(channel_id) << 8;
137  int_reg->mask_err_low = BIT(channel_id) << 8;
138 
139  /* Call the callback if registered and pass the
140  * transfer error code */
141  chan_cfg = &dma_channel_config[dma][channel_id];
142  if (chan_cfg->client_callback) {
143  chan_cfg->client_callback(chan_cfg->callback_context, 0,
144  -EIO);
145  }
146 
147  interrupt_channel_mask >>= 1;
148  channel_id++;
149  }
150 }
151 
152 QM_ISR_DECLARE(qm_dma_0_error_isr)
153 {
154  qm_dma_isr_err_handler(QM_DMA_0);
155  QM_ISR_EOI(QM_IRQ_DMA_0_ERROR_INT_VECTOR);
156 }
157 
158 QM_ISR_DECLARE(qm_dma_0_isr_0)
159 {
160  qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_0);
161  QM_ISR_EOI(QM_IRQ_DMA_0_INT_0_VECTOR);
162 }
163 
164 QM_ISR_DECLARE(qm_dma_0_isr_1)
165 {
166  qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_1);
167  QM_ISR_EOI(QM_IRQ_DMA_0_INT_1_VECTOR);
168 }
169 
170 #if (QUARK_SE)
171 QM_ISR_DECLARE(qm_dma_0_isr_2)
172 {
173  qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_2);
174  QM_ISR_EOI(QM_IRQ_DMA_0_INT_2_VECTOR);
175 }
176 
177 QM_ISR_DECLARE(qm_dma_0_isr_3)
178 {
179  qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_3);
180  QM_ISR_EOI(QM_IRQ_DMA_0_INT_3_VECTOR);
181 }
182 
183 QM_ISR_DECLARE(qm_dma_0_isr_4)
184 {
185  qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_4);
186  QM_ISR_EOI(QM_IRQ_DMA_0_INT_4_VECTOR);
187 }
188 
189 QM_ISR_DECLARE(qm_dma_0_isr_5)
190 {
191  qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_5);
192  QM_ISR_EOI(QM_IRQ_DMA_0_INT_5_VECTOR);
193 }
194 
195 QM_ISR_DECLARE(qm_dma_0_isr_6)
196 {
197  qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_6);
198  QM_ISR_EOI(QM_IRQ_DMA_0_INT_6_VECTOR);
199 }
200 
201 QM_ISR_DECLARE(qm_dma_0_isr_7)
202 {
203  qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_7);
204  QM_ISR_EOI(QM_IRQ_DMA_0_INT_7_VECTOR);
205 }
206 #endif /* QUARK_SE */
207 
208 int qm_dma_init(const qm_dma_t dma)
209 {
210  QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
211 
212  qm_dma_channel_id_t channel_id;
213  volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
214  int return_code;
215 
216  /* Enable the DMA Clock */
217  clk_dma_enable();
218 
219  /* Disable the controller */
220  return_code = dma_controller_disable(dma);
221  if (return_code) {
222  return return_code;
223  }
224 
225  /* Disable the channels and interrupts */
226  for (channel_id = 0; channel_id < QM_DMA_CHANNEL_NUM; channel_id++) {
227  return_code = dma_channel_disable(dma, channel_id);
228  if (return_code) {
229  return return_code;
230  }
231  dma_interrupt_disable(dma, channel_id);
232  }
233 
234  /* Mask all interrupts */
235  int_reg->mask_tfr_low = CHANNEL_MASK_ALL << 8;
236  int_reg->mask_block_low = CHANNEL_MASK_ALL << 8;
237  int_reg->mask_src_trans_low = CHANNEL_MASK_ALL << 8;
238  int_reg->mask_dst_trans_low = CHANNEL_MASK_ALL << 8;
239  int_reg->mask_err_low = CHANNEL_MASK_ALL << 8;
240 
241  /* Clear all interrupts */
242  int_reg->clear_tfr_low = CHANNEL_MASK_ALL;
243  int_reg->clear_block_low = CHANNEL_MASK_ALL;
244  int_reg->clear_src_trans_low = CHANNEL_MASK_ALL;
245  int_reg->clear_dst_trans_low = CHANNEL_MASK_ALL;
246  int_reg->clear_err_low = CHANNEL_MASK_ALL;
247 
248  /* Enable the controller */
249  dma_controller_enable(dma);
250 
251  return 0;
252 }
253 
255  const qm_dma_channel_id_t channel_id,
256  qm_dma_channel_config_t *const channel_config)
257 {
258  QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
259  QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
260  QM_CHECK(channel_config != NULL, -EINVAL);
261 
262  dma_cfg_prv_t *chan_cfg = &dma_channel_config[dma][channel_id];
263  int return_code;
264 
265  /* Set the transfer type. */
266  return_code = dma_set_transfer_type(dma, channel_id,
267  channel_config->transfer_type,
268  channel_config->channel_direction);
269  if (return_code) {
270  return return_code;
271  }
272 
273  /* Set the source and destination transfer width. */
274  dma_set_source_transfer_width(dma, channel_id,
275  channel_config->source_transfer_width);
276  dma_set_destination_transfer_width(
277  dma, channel_id, channel_config->destination_transfer_width);
278 
279  /* Set the source and destination burst transfer length. */
280  dma_set_source_burst_length(dma, channel_id,
281  channel_config->source_burst_length);
282  dma_set_destination_burst_length(
283  dma, channel_id, channel_config->destination_burst_length);
284 
285  /* Set channel direction */
286  dma_set_transfer_direction(dma, channel_id,
287  channel_config->channel_direction);
288 
289  /* Set the increment type depending on direction */
290  switch (channel_config->channel_direction) {
292  dma_set_source_increment(dma, channel_id,
293  QM_DMA_ADDRESS_NO_CHANGE);
294  dma_set_destination_increment(dma, channel_id,
295  QM_DMA_ADDRESS_INCREMENT);
296  break;
298  dma_set_source_increment(dma, channel_id,
299  QM_DMA_ADDRESS_INCREMENT);
300  dma_set_destination_increment(dma, channel_id,
301  QM_DMA_ADDRESS_NO_CHANGE);
302  break;
304  dma_set_source_increment(dma, channel_id,
305  QM_DMA_ADDRESS_INCREMENT);
306  dma_set_destination_increment(dma, channel_id,
307  QM_DMA_ADDRESS_INCREMENT);
308  break;
309  }
310 
311  if (channel_config->channel_direction != QM_DMA_MEMORY_TO_MEMORY) {
312  /* Set the handshake interface. */
313  dma_set_handshake_interface(
314  dma, channel_id, channel_config->handshake_interface);
315 
316  /* Set the handshake type. This is hardcoded to hardware */
317  dma_set_handshake_type(dma, channel_id, 0);
318 
319  /* Set the handshake polarity. */
320  dma_set_handshake_polarity(dma, channel_id,
321  channel_config->handshake_polarity);
322  }
323 
324  /* Save the client ID */
325  chan_cfg->callback_context = channel_config->callback_context;
326 
327  /* Save the callback provided by DMA client */
328  chan_cfg->client_callback = channel_config->client_callback;
329 
330  /* Multiblock linked list not configured. */
331  chan_cfg->lli_tail = NULL;
332 
333  /* Number of blocks per buffer (>1 when multiblock). */
334  chan_cfg->num_blocks_per_buffer = 1;
335 
336  /* Multiblock circular linked list flag. */
337  chan_cfg->transfer_type_ll_circular =
338  (channel_config->transfer_type == QM_DMA_TYPE_MULTI_LL_CIRCULAR)
339  ? true
340  : false;
341 
342  return 0;
343 }
344 
346  const qm_dma_channel_id_t channel_id,
347  qm_dma_transfer_t *const transfer_config)
348 {
349  QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
350  QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
351  QM_CHECK(transfer_config != NULL, -EINVAL);
352  QM_CHECK(transfer_config->source_address != NULL, -EINVAL);
353  QM_CHECK(transfer_config->destination_address != NULL, -EINVAL);
354  QM_CHECK(transfer_config->block_size >= QM_DMA_CTL_H_BLOCK_TS_MIN,
355  -EINVAL);
356  QM_CHECK(transfer_config->block_size <= QM_DMA_CTL_H_BLOCK_TS_MAX,
357  -EINVAL);
358 
359  /* Set the source and destination addresses. */
360  dma_set_source_address(dma, channel_id,
361  (uint32_t)transfer_config->source_address);
362  dma_set_destination_address(
363  dma, channel_id, (uint32_t)transfer_config->destination_address);
364 
365  /* Set the block size for the transfer. */
366  dma_set_block_size(dma, channel_id, transfer_config->block_size);
367 
368  return 0;
369 }
370 
371 /* Populate a linked list. */
372 static qm_dma_linked_list_item_t *
373 dma_linked_list_init(const qm_dma_multi_transfer_t *multi_transfer,
374  uint32_t ctrl_low, uint32_t tail_pointing_lli)
375 {
376  uint32_t source_address = (uint32_t)multi_transfer->source_address;
377  uint32_t destination_address =
378  (uint32_t)multi_transfer->destination_address;
379  /*
380  * Extracted source/destination increment type, used to calculate
381  * address increment between consecutive blocks.
382  */
383  qm_dma_address_increment_t source_address_inc_type =
384  (ctrl_low & QM_DMA_CTL_L_SINC_MASK) >> QM_DMA_CTL_L_SINC_OFFSET;
385  qm_dma_address_increment_t destination_address_inc_type =
386  (ctrl_low & QM_DMA_CTL_L_DINC_MASK) >> QM_DMA_CTL_L_DINC_OFFSET;
387  /* Linked list node iteration variable. */
388  qm_dma_linked_list_item_t *lli = multi_transfer->linked_list_first;
389  uint32_t source_inc = 0;
390  uint32_t destination_inc = 0;
391  uint32_t i;
392 
393  QM_ASSERT(source_address_inc_type == QM_DMA_ADDRESS_INCREMENT ||
394  source_address_inc_type == QM_DMA_ADDRESS_NO_CHANGE);
395  QM_ASSERT(destination_address_inc_type == QM_DMA_ADDRESS_INCREMENT ||
396  destination_address_inc_type == QM_DMA_ADDRESS_NO_CHANGE);
397 
398  /*
399  * Memory endpoints increment the source/destination address between
400  * consecutive LLIs by the block size times the transfer width in
401  * bytes.
402  */
403  if (source_address_inc_type == QM_DMA_ADDRESS_INCREMENT) {
404  source_inc = multi_transfer->block_size *
405  BIT((ctrl_low & QM_DMA_CTL_L_SRC_TR_WIDTH_MASK) >>
406  QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET);
407  }
408 
409  if (destination_address_inc_type == QM_DMA_ADDRESS_INCREMENT) {
410  destination_inc =
411  multi_transfer->block_size *
412  BIT((ctrl_low & QM_DMA_CTL_L_DST_TR_WIDTH_MASK) >>
413  QM_DMA_CTL_L_DST_TR_WIDTH_OFFSET);
414  }
415 
416  for (i = 0; i < multi_transfer->num_blocks; i++) {
417  lli->source_address = source_address;
418  lli->destination_address = destination_address;
419  lli->ctrl_low = ctrl_low;
420  lli->ctrl_high = multi_transfer->block_size;
421  if (i < (uint32_t)(multi_transfer->num_blocks - 1)) {
422  lli->linked_list_address =
423  (uint32_t)(qm_dma_linked_list_item_t *)(lli + 1);
424  lli++;
425  source_address += source_inc;
426  destination_address += destination_inc;
427  } else {
428  /* Last node. */
429  lli->linked_list_address = tail_pointing_lli;
430  }
431  }
432 
433  /* Last node of the populated linked list. */
434  return lli;
435 }
436 
438  const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
439  qm_dma_multi_transfer_t *const multi_transfer_config)
440 {
441  QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
442  QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
443  QM_CHECK(multi_transfer_config != NULL, -EINVAL);
444  QM_CHECK(multi_transfer_config->source_address != NULL, -EINVAL);
445  QM_CHECK(multi_transfer_config->destination_address != NULL, -EINVAL);
446  QM_CHECK(multi_transfer_config->block_size >= QM_DMA_CTL_H_BLOCK_TS_MIN,
447  -EINVAL);
448  QM_CHECK(multi_transfer_config->block_size <= QM_DMA_CTL_H_BLOCK_TS_MAX,
449  -EINVAL);
450  QM_CHECK(multi_transfer_config->num_blocks > 0, -EINVAL);
451 
452  dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
453  qm_dma_transfer_type_t transfer_type =
454  dma_get_transfer_type(dma, channel_id, prv_cfg);
455  volatile qm_dma_chan_reg_t *chan_reg =
456  &QM_DMA[dma]->chan_reg[channel_id];
457  /*
458  * Node to which last node points to, 0 on linear linked lists or first
459  * node on circular linked lists.
460  */
461  uint32_t tail_pointing_lli;
462 
463  /*
464  * Initialize block counting internal variables, needed in ISR to manage
465  * client callback invocations.
466  */
467  if (0 == chan_reg->llp_low) {
468  prv_cfg->num_blocks_per_buffer =
469  multi_transfer_config->num_blocks;
470  }
471  prv_cfg->num_blocks_int_pending = multi_transfer_config->num_blocks;
472 
473  switch (transfer_type) {
475  /* Contiguous multiblock transfer. */
476  dma_set_source_address(
477  dma, channel_id,
478  (uint32_t)multi_transfer_config->source_address);
479  dma_set_destination_address(
480  dma, channel_id,
481  (uint32_t)multi_transfer_config->destination_address);
482  dma_set_block_size(dma, channel_id,
483  multi_transfer_config->block_size);
484  break;
485 
487  /*
488  * Block interrupts are not enabled in linear linked list with
489  * single buffer as only one client callback invocation is
490  * needed, which takes place on transfer callback interrupt.
491  */
492  if (0 == chan_reg->llp_low) {
493  prv_cfg->num_blocks_int_pending = 0;
494  }
495  /* FALLTHROUGH - continue to common circular/linear LL code */
496 
498  if (multi_transfer_config->linked_list_first == NULL ||
499  ((uint32_t)multi_transfer_config->linked_list_first &
500  0x3) != 0) {
501  /*
502  * User-allocated linked list memory needs to be 4-byte
503  * alligned.
504  */
505  return -EINVAL;
506  }
507 
508  if (0 == chan_reg->llp_low) {
509  /*
510  * Either first call to this function after DMA channel
511  * config or transfer reconfiguration after a completed
512  * multiblock transfer.
513  */
514  tail_pointing_lli =
515  (transfer_type == QM_DMA_TYPE_MULTI_LL_CIRCULAR)
516  ? (uint32_t)
517  multi_transfer_config->linked_list_first
518  : 0;
519 
520  /*
521  * Initialize LLIs using CTL drom DMA register (plus
522  * INT_EN bit).
523  */
524  prv_cfg->lli_tail = dma_linked_list_init(
525  multi_transfer_config,
526  chan_reg->ctrl_low | QM_DMA_CTL_L_INT_EN_MASK,
527  tail_pointing_lli);
528 
529  /* Point DMA LLP register to this LLI. */
530  chan_reg->llp_low =
531  (uint32_t)multi_transfer_config->linked_list_first;
532  } else {
533  /*
534  * Linked list multiblock transfer (additional appended
535  * LLIs). The number of blocks needs to match the number
536  * of blocks on previous calls to this function (we only
537  * allow scatter/gather buffers of same size).
538  */
539  if (prv_cfg->num_blocks_per_buffer !=
540  multi_transfer_config->num_blocks) {
541  return -EINVAL;
542  }
543 
544  /*
545  * Reference to NULL (linear LL) or the first LLI node
546  * (circular LL), extracted from previously configured
547  * linked list.
548  */
549  tail_pointing_lli =
550  prv_cfg->lli_tail->linked_list_address;
551 
552  /*
553  * Point last previously configured linked list to this
554  * node.
555  */
556  prv_cfg->lli_tail->linked_list_address =
557  (uint32_t)multi_transfer_config->linked_list_first;
558 
559  /*
560  * Initialize LLI using CTL from last previously
561  * configured LLI, returning a pointer to the new tail
562  * node.
563  */
564  prv_cfg->lli_tail = dma_linked_list_init(
565  multi_transfer_config, prv_cfg->lli_tail->ctrl_low,
566  tail_pointing_lli);
567 
568  QM_ASSERT(prv_cfg->lli_tail->linked_list_address ==
569  tail_pointing_lli);
570  }
571  break;
572 
573  default:
574  /* Single block not allowed */
575  return -EINVAL;
576  break;
577  }
578 
579  return 0;
580 }
581 
583  const qm_dma_channel_id_t channel_id)
584 {
585  QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
586  QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
587 
588  volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
589  dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
590 
591  /* Clear all interrupts as they may be asserted from a previous
592  * transfer */
593  int_reg->clear_tfr_low = BIT(channel_id);
594  int_reg->clear_block_low = BIT(channel_id);
595  int_reg->clear_err_low = BIT(channel_id);
596 
597  /* Unmask Interrupts */
598  int_reg->mask_tfr_low = ((BIT(channel_id) << 8) | BIT(channel_id));
599  int_reg->mask_err_low = ((BIT(channel_id) << 8) | BIT(channel_id));
600 
601  if (prv_cfg->num_blocks_int_pending > 0) {
602  /*
603  * Block interrupts are only unmasked in multiblock mode
604  * (contiguous, circular linked list or multibuffer linear
605  * linked list).
606  */
607  int_reg->mask_block_low =
608  ((BIT(channel_id) << 8) | BIT(channel_id));
609  }
610 
611  /* Enable interrupts and the channel */
612  dma_interrupt_enable(dma, channel_id);
613  dma_channel_enable(dma, channel_id);
614 
615  return 0;
616 }
617 
619  const qm_dma_channel_id_t channel_id)
620 {
621  QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
622  QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
623 
624  int return_code;
625  volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
626  volatile qm_dma_chan_reg_t *chan_reg =
627  &QM_DMA[dma]->chan_reg[channel_id];
628 
629  /* Disable interrupts for the channel */
630  dma_interrupt_disable(dma, channel_id);
631 
632  /* Mask Interrupts */
633  int_reg->mask_tfr_low = (BIT(channel_id) << 8);
634  int_reg->mask_block_low = (BIT(channel_id) << 8);
635  int_reg->mask_err_low = (BIT(channel_id) << 8);
636 
637  /* Clear llp register */
638  chan_reg->llp_low = 0;
639 
640  /* The channel is disabled and the transfer complete callback is
641  * triggered. This callback provides the client with the data length
642  * transferred before the transfer was stopped. */
643  return_code = dma_channel_disable(dma, channel_id);
644  if (!return_code) {
645  dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
646  if (prv_cfg->client_callback) {
647  prv_cfg->client_callback(
648  prv_cfg->callback_context,
649  get_transfer_length(dma, channel_id, prv_cfg), 0);
650  }
651  }
652 
653  return return_code;
654 }
655 
657  const qm_dma_channel_id_t channel_id,
658  qm_dma_transfer_t *const transfer_config)
659 {
660  QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
661  QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
662  QM_CHECK(transfer_config != NULL, -EINVAL);
663  QM_CHECK(transfer_config->source_address != NULL, -EINVAL);
664  QM_CHECK(transfer_config->destination_address != NULL, -EINVAL);
665  QM_CHECK(transfer_config->block_size <= QM_DMA_CTL_H_BLOCK_TS_MAX,
666  -EINVAL);
667 
668  int return_code;
669 
670  /* Set the transfer configuration and start the transfer */
671  return_code =
672  qm_dma_transfer_set_config(dma, channel_id, transfer_config);
673  if (!return_code) {
674  return_code = qm_dma_transfer_start(dma, channel_id);
675  }
676 
677  return return_code;
678 }
679 
680 #if (ENABLE_RESTORE_CONTEXT)
682 {
683  QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
684  QM_CHECK(ctx != NULL, -EINVAL);
685  int i;
686 
687  QM_RW qm_dma_misc_reg_t *misc_reg = &QM_DMA[dma]->misc_reg;
688 
689  ctx->misc_cfg_low = misc_reg->cfg_low;
690 
691  for (i = 0; i < QM_DMA_CHANNEL_NUM; i++) {
692  QM_RW qm_dma_chan_reg_t *chan_reg = &QM_DMA[dma]->chan_reg[i];
693 
694  /* Masking the bit QM_DMA_CTL_L_INT_EN_MASK disables a possible
695  * trigger of a new transition. */
696  ctx->channel[i].ctrl_low =
697  chan_reg->ctrl_low & ~QM_DMA_CTL_L_INT_EN_MASK;
698  ctx->channel[i].cfg_low = chan_reg->cfg_low;
699  ctx->channel[i].cfg_high = chan_reg->cfg_high;
700  ctx->channel[i].llp_low = chan_reg->llp_low;
701  }
702  return 0;
703 }
704 
706  const qm_dma_context_t *const ctx)
707 {
708  QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
709  QM_CHECK(ctx != NULL, -EINVAL);
710  int i;
711  QM_RW qm_dma_misc_reg_t *misc_reg = &QM_DMA[dma]->misc_reg;
712 
713  misc_reg->cfg_low = ctx->misc_cfg_low;
714 
715  for (i = 0; i < QM_DMA_CHANNEL_NUM; i++) {
716  QM_RW qm_dma_chan_reg_t *chan_reg = &QM_DMA[dma]->chan_reg[i];
717 
718  chan_reg->ctrl_low = ctx->channel[i].ctrl_low;
719  chan_reg->cfg_low = ctx->channel[i].cfg_low;
720  chan_reg->cfg_high = ctx->channel[i].cfg_high;
721  chan_reg->llp_low = ctx->channel[i].llp_low;
722  }
723  return 0;
724 }
725 #else
726 int qm_dma_save_context(const qm_dma_t dma, qm_dma_context_t *const ctx)
727 {
728  (void)dma;
729  (void)ctx;
730 
731  return 0;
732 }
733 
734 int qm_dma_restore_context(const qm_dma_t dma,
735  const qm_dma_context_t *const ctx)
736 {
737  (void)dma;
738  (void)ctx;
739 
740  return 0;
741 }
742 #endif /* ENABLE_RESTORE_CONTEXT */
QM_RW uint32_t cfg_low
CFG.
Definition: qm_soc_regs.h:1524
DMA channel id for channel 5.
Definition: qm_soc_regs.h:1727
uint16_t num_blocks
Number of contiguous blocks to be transfered.
Definition: qm_dma.h:148
uint32_t cfg_high
Channel Configuration Upper.
Definition: qm_soc_regs.h:1915
DMA interrupt register map.
Definition: qm_soc_regs.h:1583
QM_RW uint32_t ctrl_low
CTL.
Definition: qm_soc_regs.h:1514
DMA channel configuration structure.
Definition: qm_dma.h:77
qm_dma_transfer_type_t transfer_type
DMA transfer type.
Definition: qm_dma.h:100
uint32_t * destination_address
First block destination address.
Definition: qm_dma.h:145
Link list multiblock mode.
Definition: qm_dma.h:69
uint32_t llp_low
Channel Linked List Pointer.
Definition: qm_soc_regs.h:1916
uint32_t * destination_address
DMA destination transfer address.
Definition: qm_dma.h:136
DMA channel id for channel 2.
Definition: qm_soc_regs.h:1724
qm_dma_t
DMA instances.
Definition: qm_soc_regs.h:1480
Number of DMA channels.
Definition: qm_soc_regs.h:1489
DMA single block transfer configuration structure.
Definition: qm_dma.h:133
qm_dma_burst_length_t source_burst_length
DMA source burst length.
Definition: qm_dma.h:94
DMA context type.
Definition: qm_soc_regs.h:1911
qm_dma_linked_list_item_t * linked_list_first
First block LLI descriptor or NULL (contiguous mode)
Definition: qm_dma.h:149
QM_RW uint32_t mask_src_trans_low
MaskSrcTran.
Definition: qm_soc_regs.h:1608
int clk_dma_enable(void)
Enable the DMA clock.
Definition: clk.c:362
DMA channel id for channel 1.
Definition: qm_soc_regs.h:1488
uint32_t block_size
DMA block size, Min = 1, Max = 4095.
Definition: qm_dma.h:134
void * callback_context
DMA client context passed to the callbacks.
Definition: qm_dma.h:113
Link list multiblock mode with cyclic operation.
Definition: qm_dma.h:70
QM_RW uint32_t mask_err_low
MaskErr.
Definition: qm_soc_regs.h:1612
QM_RW uint32_t clear_tfr_low
ClearTfr.
Definition: qm_soc_regs.h:1614
QM_RW uint32_t mask_tfr_low
MaskTfr.
Definition: qm_soc_regs.h:1604
qm_dma_transfer_type_t
Definition: qm_dma.h:66
uint32_t cfg_low
Channel Configuration Lower.
Definition: qm_soc_regs.h:1914
int qm_dma_restore_context(const qm_dma_t dma, const qm_dma_context_t *const ctx)
Restore DMA peripheral's context.
Definition: qm_dma.c:705
uint32_t ctrl_low
Channel Control Lower.
Definition: qm_soc_regs.h:1913
QM_RW uint32_t clear_block_low
ClearBlock.
Definition: qm_soc_regs.h:1616
DMA miscellaneous register map.
Definition: qm_soc_regs.h:1634
qm_dma_handshake_interface_t handshake_interface
DMA channel handshake interface ID.
Definition: qm_dma.h:79
QM_ISR_DECLARE(qm_dma_0_error_isr)
ISR for DMA error interrupt.
Definition: qm_dma.c:152
QM_RW uint32_t clear_dst_trans_low
ClearDstTran.
Definition: qm_soc_regs.h:1620
int qm_dma_transfer_set_config(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_transfer_t *const transfer_config)
Setup a DMA single block transfer.
Definition: qm_dma.c:345
Memory to memory transfer.
Definition: qm_dma.h:57
qm_dma_handshake_polarity_t handshake_polarity
DMA channel handshake polarity.
Definition: qm_dma.h:82
int qm_dma_multi_transfer_set_config(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_multi_transfer_t *const multi_transfer_config)
Setup a DMA multiblock transfer.
Definition: qm_dma.c:437
uint16_t block_size
DMA block size, Min = 1, Max = 4095.
Definition: qm_dma.h:146
QM_RW uint32_t llp_low
LLP.
Definition: qm_soc_regs.h:1512
QM_RW uint32_t cfg_low
DmaCfgReg.
Definition: qm_soc_regs.h:1635
DMA channel id for channel 6.
Definition: qm_soc_regs.h:1728
QM_RW uint32_t mask_block_low
MaskBlock.
Definition: qm_soc_regs.h:1606
Peripheral to memory transfer.
Definition: qm_dma.h:60
DMA controller id.
Definition: qm_soc_regs.h:1481
qm_dma_burst_length_t destination_burst_length
DMA destination burst length.
Definition: qm_dma.h:97
qm_dma_transfer_width_t destination_transfer_width
DMA destination transfer width.
Definition: qm_dma.h:91
int qm_dma_channel_set_config(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_channel_config_t *const channel_config)
Setup a DMA channel configuration.
Definition: qm_dma.c:254
QM_RW uint32_t status_err_low
StatusErr.
Definition: qm_soc_regs.h:1602
Number of DMA controllers.
Definition: qm_soc_regs.h:1482
DMA channel id for channel 0.
Definition: qm_soc_regs.h:1487
qm_dma_transfer_width_t source_transfer_width
DMA source transfer width.
Definition: qm_dma.h:88
DMA channel id for channel 3.
Definition: qm_soc_regs.h:1725
int qm_dma_transfer_terminate(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
Terminate a DMA transfer.
Definition: qm_dma.c:618
int qm_dma_transfer_start(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
Start a DMA transfer.
Definition: qm_dma.c:582
DMA channel id for channel 7.
Definition: qm_soc_regs.h:1729
QM_RW uint32_t status_tfr_low
StatusTfr.
Definition: qm_soc_regs.h:1594
int qm_dma_init(const qm_dma_t dma)
Initialise the DMA controller.
Definition: qm_dma.c:208
DMA multiblock transfer configuration structure.
Definition: qm_dma.h:143
DMA channel register map.
Definition: qm_soc_regs.h:1507
Memory to peripheral transfer.
Definition: qm_dma.h:58
DMA channel id for channel 4.
Definition: qm_soc_regs.h:1726
uint32_t * source_address
DMA source transfer address.
Definition: qm_dma.h:135
QM_RW uint32_t status_block_low
StatusBlock.
Definition: qm_soc_regs.h:1596
uint32_t misc_cfg_low
DMA Configuration.
Definition: qm_soc_regs.h:1918
Contiguous multiblock mode.
Definition: qm_dma.h:68
int qm_dma_transfer_mem_to_mem(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_transfer_t *const transfer_config)
Setup and start memory to memory transfer.
Definition: qm_dma.c:656
qm_dma_channel_id_t
DMA channel IDs.
Definition: qm_soc_regs.h:1486
uint32_t * source_address
First block source address.
Definition: qm_dma.h:144
QM_RW uint32_t status_int_low
StatusInt.
Definition: qm_soc_regs.h:1624
QM_RW uint32_t clear_src_trans_low
ClearSrcTran.
Definition: qm_soc_regs.h:1618
QM_RW uint32_t mask_dst_trans_low
MaskDstTran.
Definition: qm_soc_regs.h:1610
qm_dma_channel_direction_t channel_direction
DMA channel direction.
Definition: qm_dma.h:85
int qm_dma_save_context(const qm_dma_t dma, qm_dma_context_t *const ctx)
Save DMA peripheral's context.
Definition: qm_dma.c:681
QM_RW uint32_t clear_err_low
ClearErr.
Definition: qm_soc_regs.h:1622
void(* client_callback)(void *callback_context, uint32_t len, int error_code)
Client callback for DMA transfer ISR.
Definition: qm_dma.h:109