9 qm_dma_reg_t *qm_dma[
QM_DMA_NUM] = {(qm_dma_reg_t *)QM_DMA_BASE};
23 static void qm_dma_isr_handler(
const qm_dma_t dma,
26 dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
29 &QM_DMA[dma]->chan_reg[channel_id];
30 uint32_t transfer_length =
31 get_transfer_length(dma, channel_id, prv_cfg);
59 if (prv_cfg->client_callback) {
61 prv_cfg->client_callback(prv_cfg->callback_context,
71 prv_cfg->num_blocks_int_pending--;
73 if (NULL != prv_cfg->lli_tail &&
74 0 == prv_cfg->num_blocks_int_pending) {
79 if (prv_cfg->client_callback) {
80 prv_cfg->client_callback(
81 prv_cfg->callback_context, transfer_length,
86 prv_cfg->num_blocks_int_pending =
87 prv_cfg->num_blocks_per_buffer;
89 }
else if (NULL == prv_cfg->lli_tail) {
90 QM_ASSERT(prv_cfg->num_blocks_int_pending <
91 prv_cfg->num_blocks_per_buffer);
92 if (1 == prv_cfg->num_blocks_int_pending) {
100 ~QM_DMA_CFG_L_RELOAD_SRC_MASK;
102 ~QM_DMA_CFG_L_RELOAD_DST_MASK;
111 static void qm_dma_isr_err_handler(
const qm_dma_t dma)
113 uint32_t interrupt_channel_mask;
114 dma_cfg_prv_t *chan_cfg;
122 while (interrupt_channel_mask) {
125 if (!(interrupt_channel_mask & 0x1)) {
126 interrupt_channel_mask >>= 1;
141 chan_cfg = &dma_channel_config[dma][channel_id];
142 if (chan_cfg->client_callback) {
143 chan_cfg->client_callback(chan_cfg->callback_context, 0,
147 interrupt_channel_mask >>= 1;
155 QM_ISR_EOI(QM_IRQ_DMA_0_ERROR_INT_VECTOR);
161 QM_ISR_EOI(QM_IRQ_DMA_0_INT_0_VECTOR);
167 QM_ISR_EOI(QM_IRQ_DMA_0_INT_1_VECTOR);
174 QM_ISR_EOI(QM_IRQ_DMA_0_INT_2_VECTOR);
180 QM_ISR_EOI(QM_IRQ_DMA_0_INT_3_VECTOR);
186 QM_ISR_EOI(QM_IRQ_DMA_0_INT_4_VECTOR);
192 QM_ISR_EOI(QM_IRQ_DMA_0_INT_5_VECTOR);
198 QM_ISR_EOI(QM_IRQ_DMA_0_INT_6_VECTOR);
204 QM_ISR_EOI(QM_IRQ_DMA_0_INT_7_VECTOR);
220 return_code = dma_controller_disable(dma);
227 return_code = dma_channel_disable(dma, channel_id);
231 dma_interrupt_disable(dma, channel_id);
249 dma_controller_enable(dma);
260 QM_CHECK(channel_config != NULL, -EINVAL);
262 dma_cfg_prv_t *chan_cfg = &dma_channel_config[dma][channel_id];
266 return_code = dma_set_transfer_type(dma, channel_id,
274 dma_set_source_transfer_width(dma, channel_id,
276 dma_set_destination_transfer_width(
280 dma_set_source_burst_length(dma, channel_id,
282 dma_set_destination_burst_length(
286 dma_set_transfer_direction(dma, channel_id,
292 dma_set_source_increment(dma, channel_id,
293 QM_DMA_ADDRESS_NO_CHANGE);
294 dma_set_destination_increment(dma, channel_id,
295 QM_DMA_ADDRESS_INCREMENT);
298 dma_set_source_increment(dma, channel_id,
299 QM_DMA_ADDRESS_INCREMENT);
300 dma_set_destination_increment(dma, channel_id,
301 QM_DMA_ADDRESS_NO_CHANGE);
304 dma_set_source_increment(dma, channel_id,
305 QM_DMA_ADDRESS_INCREMENT);
306 dma_set_destination_increment(dma, channel_id,
307 QM_DMA_ADDRESS_INCREMENT);
313 dma_set_handshake_interface(
317 dma_set_handshake_type(dma, channel_id, 0);
320 dma_set_handshake_polarity(dma, channel_id,
331 chan_cfg->lli_tail = NULL;
334 chan_cfg->num_blocks_per_buffer = 1;
337 chan_cfg->transfer_type_ll_circular =
351 QM_CHECK(transfer_config != NULL, -EINVAL);
354 QM_CHECK(transfer_config->
block_size >= QM_DMA_CTL_H_BLOCK_TS_MIN,
356 QM_CHECK(transfer_config->
block_size <= QM_DMA_CTL_H_BLOCK_TS_MAX,
360 dma_set_source_address(dma, channel_id,
362 dma_set_destination_address(
366 dma_set_block_size(dma, channel_id, transfer_config->
block_size);
372 static qm_dma_linked_list_item_t *
374 uint32_t ctrl_low, uint32_t tail_pointing_lli)
376 uint32_t source_address = (uint32_t)multi_transfer->
source_address;
377 uint32_t destination_address =
383 qm_dma_address_increment_t source_address_inc_type =
384 (ctrl_low & QM_DMA_CTL_L_SINC_MASK) >> QM_DMA_CTL_L_SINC_OFFSET;
385 qm_dma_address_increment_t destination_address_inc_type =
386 (ctrl_low & QM_DMA_CTL_L_DINC_MASK) >> QM_DMA_CTL_L_DINC_OFFSET;
389 uint32_t source_inc = 0;
390 uint32_t destination_inc = 0;
393 QM_ASSERT(source_address_inc_type == QM_DMA_ADDRESS_INCREMENT ||
394 source_address_inc_type == QM_DMA_ADDRESS_NO_CHANGE);
395 QM_ASSERT(destination_address_inc_type == QM_DMA_ADDRESS_INCREMENT ||
396 destination_address_inc_type == QM_DMA_ADDRESS_NO_CHANGE);
403 if (source_address_inc_type == QM_DMA_ADDRESS_INCREMENT) {
405 BIT((ctrl_low & QM_DMA_CTL_L_SRC_TR_WIDTH_MASK) >>
406 QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET);
409 if (destination_address_inc_type == QM_DMA_ADDRESS_INCREMENT) {
412 BIT((ctrl_low & QM_DMA_CTL_L_DST_TR_WIDTH_MASK) >>
413 QM_DMA_CTL_L_DST_TR_WIDTH_OFFSET);
416 for (i = 0; i < multi_transfer->
num_blocks; i++) {
417 lli->source_address = source_address;
418 lli->destination_address = destination_address;
419 lli->ctrl_low = ctrl_low;
421 if (i < (uint32_t)(multi_transfer->
num_blocks - 1)) {
422 lli->linked_list_address =
423 (uint32_t)(qm_dma_linked_list_item_t *)(lli + 1);
425 source_address += source_inc;
426 destination_address += destination_inc;
429 lli->linked_list_address = tail_pointing_lli;
443 QM_CHECK(multi_transfer_config != NULL, -EINVAL);
446 QM_CHECK(multi_transfer_config->
block_size >= QM_DMA_CTL_H_BLOCK_TS_MIN,
448 QM_CHECK(multi_transfer_config->
block_size <= QM_DMA_CTL_H_BLOCK_TS_MAX,
450 QM_CHECK(multi_transfer_config->
num_blocks > 0, -EINVAL);
452 dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
454 dma_get_transfer_type(dma, channel_id, prv_cfg);
456 &QM_DMA[dma]->chan_reg[channel_id];
461 uint32_t tail_pointing_lli;
468 prv_cfg->num_blocks_per_buffer =
471 prv_cfg->num_blocks_int_pending = multi_transfer_config->
num_blocks;
473 switch (transfer_type) {
476 dma_set_source_address(
479 dma_set_destination_address(
482 dma_set_block_size(dma, channel_id,
493 prv_cfg->num_blocks_int_pending = 0;
524 prv_cfg->lli_tail = dma_linked_list_init(
525 multi_transfer_config,
526 chan_reg->
ctrl_low | QM_DMA_CTL_L_INT_EN_MASK,
539 if (prv_cfg->num_blocks_per_buffer !=
550 prv_cfg->lli_tail->linked_list_address;
556 prv_cfg->lli_tail->linked_list_address =
564 prv_cfg->lli_tail = dma_linked_list_init(
565 multi_transfer_config, prv_cfg->lli_tail->ctrl_low,
568 QM_ASSERT(prv_cfg->lli_tail->linked_list_address ==
589 dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
598 int_reg->
mask_tfr_low = ((BIT(channel_id) << 8) | BIT(channel_id));
599 int_reg->
mask_err_low = ((BIT(channel_id) << 8) | BIT(channel_id));
601 if (prv_cfg->num_blocks_int_pending > 0) {
608 ((BIT(channel_id) << 8) | BIT(channel_id));
612 dma_interrupt_enable(dma, channel_id);
613 dma_channel_enable(dma, channel_id);
627 &QM_DMA[dma]->chan_reg[channel_id];
630 dma_interrupt_disable(dma, channel_id);
643 return_code = dma_channel_disable(dma, channel_id);
645 dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
646 if (prv_cfg->client_callback) {
647 prv_cfg->client_callback(
648 prv_cfg->callback_context,
649 get_transfer_length(dma, channel_id, prv_cfg), 0);
662 QM_CHECK(transfer_config != NULL, -EINVAL);
665 QM_CHECK(transfer_config->
block_size <= QM_DMA_CTL_H_BLOCK_TS_MAX,
680 #if (ENABLE_RESTORE_CONTEXT)
684 QM_CHECK(ctx != NULL, -EINVAL);
697 chan_reg->ctrl_low & ~QM_DMA_CTL_L_INT_EN_MASK;
698 ctx->channel[i].
cfg_low = chan_reg->cfg_low;
699 ctx->channel[i].
cfg_high = chan_reg->cfg_high;
700 ctx->channel[i].
llp_low = chan_reg->llp_low;
709 QM_CHECK(ctx != NULL, -EINVAL);
719 chan_reg->cfg_low = ctx->channel[i].
cfg_low;
720 chan_reg->cfg_high = ctx->channel[i].
cfg_high;
721 chan_reg->llp_low = ctx->channel[i].
llp_low;
QM_RW uint32_t cfg_low
CFG.
DMA channel id for channel 5.
uint16_t num_blocks
Number of contiguous blocks to be transfered.
uint32_t cfg_high
Channel Configuration Upper.
DMA interrupt register map.
QM_RW uint32_t ctrl_low
CTL.
DMA channel configuration structure.
qm_dma_transfer_type_t transfer_type
DMA transfer type.
uint32_t * destination_address
First block destination address.
Link list multiblock mode.
uint32_t llp_low
Channel Linked List Pointer.
uint32_t * destination_address
DMA destination transfer address.
DMA channel id for channel 2.
DMA single block transfer configuration structure.
qm_dma_burst_length_t source_burst_length
DMA source burst length.
qm_dma_linked_list_item_t * linked_list_first
First block LLI descriptor or NULL (contiguous mode)
QM_RW uint32_t mask_src_trans_low
MaskSrcTran.
int clk_dma_enable(void)
Enable the DMA clock.
DMA channel id for channel 1.
uint32_t block_size
DMA block size, Min = 1, Max = 4095.
void * callback_context
DMA client context passed to the callbacks.
Link list multiblock mode with cyclic operation.
QM_RW uint32_t mask_err_low
MaskErr.
QM_RW uint32_t clear_tfr_low
ClearTfr.
QM_RW uint32_t mask_tfr_low
MaskTfr.
uint32_t cfg_low
Channel Configuration Lower.
int qm_dma_restore_context(const qm_dma_t dma, const qm_dma_context_t *const ctx)
Restore DMA peripheral's context.
uint32_t ctrl_low
Channel Control Lower.
QM_RW uint32_t clear_block_low
ClearBlock.
DMA miscellaneous register map.
qm_dma_handshake_interface_t handshake_interface
DMA channel handshake interface ID.
QM_ISR_DECLARE(qm_dma_0_error_isr)
ISR for DMA error interrupt.
QM_RW uint32_t clear_dst_trans_low
ClearDstTran.
int qm_dma_transfer_set_config(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_transfer_t *const transfer_config)
Setup a DMA single block transfer.
Memory to memory transfer.
qm_dma_handshake_polarity_t handshake_polarity
DMA channel handshake polarity.
int qm_dma_multi_transfer_set_config(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_multi_transfer_t *const multi_transfer_config)
Setup a DMA multiblock transfer.
uint16_t block_size
DMA block size, Min = 1, Max = 4095.
QM_RW uint32_t llp_low
LLP.
QM_RW uint32_t cfg_low
DmaCfgReg.
DMA channel id for channel 6.
QM_RW uint32_t mask_block_low
MaskBlock.
Peripheral to memory transfer.
qm_dma_burst_length_t destination_burst_length
DMA destination burst length.
qm_dma_transfer_width_t destination_transfer_width
DMA destination transfer width.
int qm_dma_channel_set_config(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_channel_config_t *const channel_config)
Setup a DMA channel configuration.
QM_RW uint32_t status_err_low
StatusErr.
Number of DMA controllers.
DMA channel id for channel 0.
qm_dma_transfer_width_t source_transfer_width
DMA source transfer width.
DMA channel id for channel 3.
int qm_dma_transfer_terminate(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
Terminate a DMA transfer.
int qm_dma_transfer_start(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
Start a DMA transfer.
DMA channel id for channel 7.
QM_RW uint32_t status_tfr_low
StatusTfr.
int qm_dma_init(const qm_dma_t dma)
Initialise the DMA controller.
DMA multiblock transfer configuration structure.
DMA channel register map.
Memory to peripheral transfer.
DMA channel id for channel 4.
uint32_t * source_address
DMA source transfer address.
QM_RW uint32_t status_block_low
StatusBlock.
uint32_t misc_cfg_low
DMA Configuration.
Contiguous multiblock mode.
int qm_dma_transfer_mem_to_mem(const qm_dma_t dma, const qm_dma_channel_id_t channel_id, qm_dma_transfer_t *const transfer_config)
Setup and start memory to memory transfer.
qm_dma_channel_id_t
DMA channel IDs.
uint32_t * source_address
First block source address.
QM_RW uint32_t status_int_low
StatusInt.
QM_RW uint32_t clear_src_trans_low
ClearSrcTran.
QM_RW uint32_t mask_dst_trans_low
MaskDstTran.
qm_dma_channel_direction_t channel_direction
DMA channel direction.
int qm_dma_save_context(const qm_dma_t dma, qm_dma_context_t *const ctx)
Save DMA peripheral's context.
QM_RW uint32_t clear_err_low
ClearErr.
void(* client_callback)(void *callback_context, uint32_t len, int error_code)
Client callback for DMA transfer ISR.