Intel® Quark™ Microcontroller Software Interface  1.4.0
Intel® Quark™ Microcontroller BSP
dma.h
1 /*
2  * {% copyright %}
3  */
4 
5 #ifndef __DMA_H_
6 #define __DMA_H_
7 
8 #include <errno.h>
9 #include "clk.h"
10 #include "qm_dma.h"
11 
12 /* Timeout definitions */
13 #define STANDARD_TIMEOUT_MICROSECOND (1000)
14 #define ONE_MICROSECOND (1)
15 
16 /* Set specific register bits */
17 #define UPDATE_REG_BITS(reg, value, offset, mask) \
18  { \
19  reg &= ~mask; \
20  reg |= (value << offset); \
21  } \
22  while (0)
23 
24 /* Mask for all supported channels */
25 #define CHANNEL_MASK_ALL (BIT(QM_DMA_CHANNEL_NUM) - 1)
26 
27 /*
28  * DMA address increment type.
29  */
30 typedef enum {
31  QM_DMA_ADDRESS_INCREMENT = 0x0, /**< Increment address. */
32  QM_DMA_ADDRESS_DECREMENT = 0x1, /**< Decrement address. */
33  QM_DMA_ADDRESS_NO_CHANGE = 0x2 /**< Don't modify address. */
34 } qm_dma_address_increment_t;
35 
36 /*
37  * DMA channel private structure.
38  */
39 typedef struct dma_cfg_prv_t {
40  /* DMA client context to be passed back with callbacks */
41  void *callback_context;
42 
43  /* DMA channel transfer callback */
44  void (*client_callback)(void *callback_context, uint32_t len,
45  int error_code);
46 
47  /* Pointer to latest configured LLI (multiblock linked list). */
48  qm_dma_linked_list_item_t *lli_tail;
49 
50  /*
51  * Number of contiguous blocks per buffer (multiblock mode). This is
52  * needed to calculate the total transfer length which is communicated
53  * to the client with the complete callback. In linked list mode, where
54  * more than one buffer (scatter/gather) are used, this is also used to
55  * count single block transfer callbacks so that we know when to invoke
56  * the client callback corresponding to a whole transfered buffer.
57  */
58  uint16_t num_blocks_per_buffer;
59 
60  /*
61  * Number of block interrupts pending on the buffer currently being
62  * transfered. Used in multiblock continuous mode as well as multiblock
63  * link list mode when more than one buffer is set up. This counter is
64  * decremented on each block interrupt.
65  */
66  uint16_t num_blocks_int_pending;
67 
68  /*
69  * In multiblock linked list mode, indicates whether transfer is linear
70  * or circular. This information cannot be extracted from the DMA regs.
71  */
72  bool transfer_type_ll_circular;
73 } dma_cfg_prv_t;
74 
75 /*
76  * The length of the transfer at the time that this function is called is
77  * returned. The value returned is defined in bytes.
78  */
79 static __inline__ uint32_t
80 get_transfer_length(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
81  const dma_cfg_prv_t *prv_cfg)
82 {
83  uint32_t source_transfer_width;
84  uint32_t transfer_length;
85  uint32_t ctrl_low;
86  uint32_t ctrl_high;
87 
88  QM_ASSERT(prv_cfg != NULL);
89  if (NULL == prv_cfg->lli_tail) {
90  /* Single block or contiguous multiblock. */
91  volatile qm_dma_chan_reg_t *chan_reg =
92  &QM_DMA[dma]->chan_reg[channel_id];
93  ctrl_low = chan_reg->ctrl_low;
94  ctrl_high = chan_reg->ctrl_high;
95  } else {
96  /* Linked list multiblock. */
97  ctrl_low = prv_cfg->lli_tail->ctrl_low;
98  ctrl_high = prv_cfg->lli_tail->ctrl_high;
99  }
100 
101  /* Read the source transfer width register value. */
102  source_transfer_width = ((ctrl_low & QM_DMA_CTL_L_SRC_TR_WIDTH_MASK) >>
103  QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET);
104 
105  /* Read the length from the block_ts field. The units of this field
106  * are dependent on the source transfer width. */
107  transfer_length = ((ctrl_high & QM_DMA_CTL_H_BLOCK_TS_MASK) >>
108  QM_DMA_CTL_H_BLOCK_TS_OFFSET) *
109  prv_cfg->num_blocks_per_buffer;
110 
111  /* To convert this to bytes the transfer length can be shifted using
112  * the source transfer width value. This value correspond to the
113  * shifts required and so this can be done as an optimization. */
114  return (transfer_length << source_transfer_width);
115 }
116 
117 static __inline__ int dma_controller_disable(const qm_dma_t dma)
118 {
119  volatile qm_dma_misc_reg_t *misc_reg = &QM_DMA[dma]->misc_reg;
120 
121  misc_reg->cfg_low = 0;
122  if (misc_reg->cfg_low) {
123  return -EIO;
124  }
125 
126  return 0;
127 }
128 
129 static __inline__ void dma_controller_enable(const qm_dma_t dma)
130 {
131  QM_DMA[dma]->misc_reg.cfg_low = QM_DMA_MISC_CFG_DMA_EN;
132 }
133 
134 static int dma_channel_disable(const qm_dma_t dma,
135  const qm_dma_channel_id_t channel_id)
136 {
137  uint8_t channel_mask = BIT(channel_id);
138  uint16_t timeout_us;
139  volatile qm_dma_misc_reg_t *misc_reg = &QM_DMA[dma]->misc_reg;
140  volatile qm_dma_chan_reg_t *chan_reg =
141  &QM_DMA[dma]->chan_reg[channel_id];
142 
143  /* If the channel is already disabled return */
144  if (!(misc_reg->chan_en_low & channel_mask)) {
145  return 0;
146  }
147 
148  /* Suspend the channel */
149  chan_reg->cfg_low |= QM_DMA_CFG_L_CH_SUSP_MASK;
150 
151  /* Ensure that the channel has been suspended */
152  timeout_us = STANDARD_TIMEOUT_MICROSECOND;
153  while ((!(chan_reg->cfg_low & QM_DMA_CFG_L_CH_SUSP_MASK)) &&
154  timeout_us) {
155  clk_sys_udelay(ONE_MICROSECOND);
156  timeout_us--;
157  }
158 
159  if (!(chan_reg->cfg_low & QM_DMA_CFG_L_CH_SUSP_MASK)) {
160  return -EIO;
161  }
162 
163  /* Wait until the fifo is empty */
164  timeout_us = STANDARD_TIMEOUT_MICROSECOND;
165  while ((!(chan_reg->cfg_low & QM_DMA_CFG_L_FIFO_EMPTY_MASK)) &&
166  timeout_us) {
167  clk_sys_udelay(ONE_MICROSECOND);
168  timeout_us--;
169  }
170 
171  /* Disable the channel and wait to confirm that it has been disabled. */
172  misc_reg->chan_en_low = (channel_mask << QM_DMA_MISC_CHAN_EN_WE_OFFSET);
173 
174  timeout_us = STANDARD_TIMEOUT_MICROSECOND;
175  while ((misc_reg->chan_en_low & channel_mask) && timeout_us) {
176  clk_sys_udelay(ONE_MICROSECOND);
177  timeout_us--;
178  }
179 
180  if (misc_reg->chan_en_low & channel_mask) {
181  return -EIO;
182  }
183 
184  /* Set the channel to resume */
185  chan_reg->cfg_low &= ~QM_DMA_CFG_L_CH_SUSP_MASK;
186 
187  return 0;
188 }
189 
190 static __inline__ void dma_channel_enable(const qm_dma_t dma,
191  const qm_dma_channel_id_t channel_id)
192 {
193  uint8_t channel_mask = BIT(channel_id);
194 
195  QM_DMA[dma]->misc_reg.chan_en_low =
196  (channel_mask << QM_DMA_MISC_CHAN_EN_WE_OFFSET) | channel_mask;
197 }
198 
199 static __inline__ void
200 dma_interrupt_disable(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
201 {
202  volatile qm_dma_chan_reg_t *chan_reg =
203  &QM_DMA[dma]->chan_reg[channel_id];
204 
205  chan_reg->ctrl_low &= ~QM_DMA_CTL_L_INT_EN_MASK;
206 }
207 
208 static __inline__ void
209 dma_interrupt_enable(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
210 {
211  volatile qm_dma_chan_reg_t *chan_reg =
212  &QM_DMA[dma]->chan_reg[channel_id];
213 
214  chan_reg->ctrl_low |= QM_DMA_CTL_L_INT_EN_MASK;
215 }
216 
217 static __inline__ int
218 dma_set_transfer_type(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
219  const qm_dma_transfer_type_t transfer_type,
220  const qm_dma_channel_direction_t channel_direction)
221 {
222  volatile qm_dma_chan_reg_t *chan_reg =
223  &QM_DMA[dma]->chan_reg[channel_id];
224 
225  /*
226  * Valid for single block and contiguous multiblock, will be later
227  * updated if using linked list multiblock.
228  */
229  chan_reg->llp_low = 0x0;
230 
231  /* Currently only single block is supported */
232  switch (transfer_type) {
233  case QM_DMA_TYPE_SINGLE:
234  chan_reg->ctrl_low &= ~QM_DMA_CTL_L_LLP_SRC_EN_MASK;
235  chan_reg->ctrl_low &= ~QM_DMA_CTL_L_LLP_DST_EN_MASK;
236  chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_SRC_MASK;
237  chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_DST_MASK;
238  break;
239 
241  if (QM_DMA_MEMORY_TO_MEMORY == channel_direction) {
242  /*
243  * The DMA core cannot handle memory to memory
244  * multiblock contiguous transactions.
245  */
246  return -EINVAL;
247  } else if (QM_DMA_PERIPHERAL_TO_MEMORY == channel_direction) {
248  /* Reload source. */
249  chan_reg->cfg_low |= QM_DMA_CFG_L_RELOAD_SRC_MASK;
250  chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_DST_MASK;
251  } else {
252  /* Reload destination. */
253  chan_reg->cfg_low |= QM_DMA_CFG_L_RELOAD_DST_MASK;
254  chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_SRC_MASK;
255  }
256 
257  /* Disable block chaining. */
258  chan_reg->ctrl_low &= ~QM_DMA_CTL_L_LLP_SRC_EN_MASK;
259  chan_reg->ctrl_low &= ~QM_DMA_CTL_L_LLP_DST_EN_MASK;
260  break;
261 
264  /* Destination status update disable. */
265  chan_reg->cfg_high &= ~QM_DMA_CFG_H_DS_UPD_EN_MASK;
266 
267  /* Source status update disable. */
268  chan_reg->cfg_high &= ~QM_DMA_CFG_H_SS_UPD_EN_MASK;
269 
270  /* Enable linked lists for source. */
271  chan_reg->ctrl_low |= QM_DMA_CTL_L_LLP_SRC_EN_MASK;
272  chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_SRC_MASK;
273 
274  /* Enable linked lists for destination. */
275  chan_reg->ctrl_low |= QM_DMA_CTL_L_LLP_DST_EN_MASK;
276  chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_DST_MASK;
277  break;
278 
279  default:
280  return -EINVAL;
281  }
282 
283  return 0;
284 }
285 
286 static __inline__ qm_dma_transfer_type_t
287 dma_get_transfer_type(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
288  const dma_cfg_prv_t *prv_cfg)
289 {
290  qm_dma_transfer_type_t transfer_type;
291  volatile qm_dma_chan_reg_t *chan_reg =
292  &QM_DMA[dma]->chan_reg[channel_id];
293 
294  if (0 == (chan_reg->ctrl_low & (QM_DMA_CTL_L_LLP_SRC_EN_MASK |
295  QM_DMA_CTL_L_LLP_DST_EN_MASK))) {
296  /* Block chaining disabled */
297  if (0 == (chan_reg->cfg_low & (QM_DMA_CFG_L_RELOAD_SRC_MASK |
298  QM_DMA_CFG_L_RELOAD_DST_MASK))) {
299  /* Single block transfer */
300  transfer_type = QM_DMA_TYPE_SINGLE;
301  } else {
302  /* Contiguous multiblock */
303  transfer_type = QM_DMA_TYPE_MULTI_CONT;
304  }
305  } else {
306  /* LLP enabled, linked list multiblock */
307  transfer_type = (prv_cfg->transfer_type_ll_circular)
310  }
311 
312  return transfer_type;
313 }
314 
315 static __inline__ void
316 dma_set_source_transfer_width(const qm_dma_t dma,
317  const qm_dma_channel_id_t channel_id,
318  const qm_dma_transfer_width_t transfer_width)
319 {
320  volatile qm_dma_chan_reg_t *chan_reg =
321  &QM_DMA[dma]->chan_reg[channel_id];
322 
323  UPDATE_REG_BITS(chan_reg->ctrl_low, transfer_width,
324  QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET,
325  QM_DMA_CTL_L_SRC_TR_WIDTH_MASK);
326 }
327 
328 static __inline__ void
329 dma_set_destination_transfer_width(const qm_dma_t dma,
330  const qm_dma_channel_id_t channel_id,
331  const qm_dma_transfer_width_t transfer_width)
332 {
333  volatile qm_dma_chan_reg_t *chan_reg =
334  &QM_DMA[dma]->chan_reg[channel_id];
335 
336  UPDATE_REG_BITS(chan_reg->ctrl_low, transfer_width,
337  QM_DMA_CTL_L_DST_TR_WIDTH_OFFSET,
338  QM_DMA_CTL_L_DST_TR_WIDTH_MASK);
339 }
340 
341 static __inline__ void
342 dma_set_source_burst_length(const qm_dma_t dma,
343  const qm_dma_channel_id_t channel_id,
344  const qm_dma_burst_length_t burst_length)
345 {
346  volatile qm_dma_chan_reg_t *chan_reg =
347  &QM_DMA[dma]->chan_reg[channel_id];
348 
349  UPDATE_REG_BITS(chan_reg->ctrl_low, burst_length,
350  QM_DMA_CTL_L_SRC_MSIZE_OFFSET,
351  QM_DMA_CTL_L_SRC_MSIZE_MASK);
352 }
353 
354 static __inline__ void
355 dma_set_destination_burst_length(const qm_dma_t dma,
356  const qm_dma_channel_id_t channel_id,
357  const qm_dma_burst_length_t burst_length)
358 {
359  volatile qm_dma_chan_reg_t *chan_reg =
360  &QM_DMA[dma]->chan_reg[channel_id];
361 
362  UPDATE_REG_BITS(chan_reg->ctrl_low, burst_length,
363  QM_DMA_CTL_L_DEST_MSIZE_OFFSET,
364  QM_DMA_CTL_L_DEST_MSIZE_MASK);
365 }
366 
367 static __inline__ void
368 dma_set_transfer_direction(const qm_dma_t dma,
369  const qm_dma_channel_id_t channel_id,
370  const qm_dma_channel_direction_t transfer_direction)
371 {
372  volatile qm_dma_chan_reg_t *chan_reg =
373  &QM_DMA[dma]->chan_reg[channel_id];
374 
375  UPDATE_REG_BITS(chan_reg->ctrl_low, transfer_direction,
376  QM_DMA_CTL_L_TT_FC_OFFSET, QM_DMA_CTL_L_TT_FC_MASK);
377 }
378 
379 static __inline__ void
380 dma_set_source_increment(const qm_dma_t dma,
381  const qm_dma_channel_id_t channel_id,
382  const qm_dma_address_increment_t address_increment)
383 {
384  volatile qm_dma_chan_reg_t *chan_reg =
385  &QM_DMA[dma]->chan_reg[channel_id];
386 
387  UPDATE_REG_BITS(chan_reg->ctrl_low, address_increment,
388  QM_DMA_CTL_L_SINC_OFFSET, QM_DMA_CTL_L_SINC_MASK);
389 }
390 
391 static __inline__ void dma_set_destination_increment(
392  const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
393  const qm_dma_address_increment_t address_increment)
394 {
395  volatile qm_dma_chan_reg_t *chan_reg =
396  &QM_DMA[dma]->chan_reg[channel_id];
397 
398  UPDATE_REG_BITS(chan_reg->ctrl_low, address_increment,
399  QM_DMA_CTL_L_DINC_OFFSET, QM_DMA_CTL_L_DINC_MASK);
400 }
401 
402 static __inline__ void dma_set_handshake_interface(
403  const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
404  const qm_dma_handshake_interface_t handshake_interface)
405 {
406  volatile qm_dma_chan_reg_t *chan_reg =
407  &QM_DMA[dma]->chan_reg[channel_id];
408 
409  UPDATE_REG_BITS(chan_reg->cfg_high, handshake_interface,
410  QM_DMA_CFG_H_SRC_PER_OFFSET, QM_DMA_CFG_H_SRC_PER_MASK);
411 
412  UPDATE_REG_BITS(chan_reg->cfg_high, handshake_interface,
413  QM_DMA_CFG_H_DEST_PER_OFFSET,
414  QM_DMA_CFG_H_DEST_PER_MASK);
415 }
416 
417 static __inline__ void
418 dma_set_handshake_type(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
419  const uint8_t handshake_type)
420 {
421  volatile qm_dma_chan_reg_t *chan_reg =
422  &QM_DMA[dma]->chan_reg[channel_id];
423 
424  UPDATE_REG_BITS(chan_reg->cfg_low, handshake_type,
425  QM_DMA_CFG_L_HS_SEL_SRC_OFFSET,
426  QM_DMA_CFG_L_HS_SEL_SRC_MASK);
427 
428  UPDATE_REG_BITS(chan_reg->cfg_low, handshake_type,
429  QM_DMA_CFG_L_HS_SEL_DST_OFFSET,
430  QM_DMA_CFG_L_HS_SEL_DST_MASK);
431 }
432 
433 static __inline__ void
434 dma_set_handshake_polarity(const qm_dma_t dma,
435  const qm_dma_channel_id_t channel_id,
436  const qm_dma_handshake_polarity_t handshake_polarity)
437 {
438  volatile qm_dma_chan_reg_t *chan_reg =
439  &QM_DMA[dma]->chan_reg[channel_id];
440 
441  UPDATE_REG_BITS(chan_reg->cfg_low, handshake_polarity,
442  QM_DMA_CFG_L_SRC_HS_POL_OFFSET,
443  QM_DMA_CFG_L_SRC_HS_POL_MASK);
444 
445  UPDATE_REG_BITS(chan_reg->cfg_low, handshake_polarity,
446  QM_DMA_CFG_L_DST_HS_POL_OFFSET,
447  QM_DMA_CFG_L_DST_HS_POL_MASK);
448 }
449 
450 static __inline__ void
451 dma_set_source_address(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
452  const uint32_t source_address)
453 {
454  QM_DMA[dma]->chan_reg[channel_id].sar_low = source_address;
455 }
456 
457 static __inline__ void
458 dma_set_destination_address(const qm_dma_t dma,
459  const qm_dma_channel_id_t channel_id,
460  const uint32_t destination_address)
461 {
462  QM_DMA[dma]->chan_reg[channel_id].dar_low = destination_address;
463 }
464 
465 static __inline__ void dma_set_block_size(const qm_dma_t dma,
466  const qm_dma_channel_id_t channel_id,
467  const uint32_t block_size)
468 {
469  volatile qm_dma_chan_reg_t *chan_reg =
470  &QM_DMA[dma]->chan_reg[channel_id];
471 
472  UPDATE_REG_BITS(chan_reg->ctrl_high, block_size,
473  QM_DMA_CTL_H_BLOCK_TS_OFFSET,
474  QM_DMA_CTL_H_BLOCK_TS_MASK);
475 }
476 
477 #endif /* __DMA_H_ */
QM_RW uint32_t cfg_low
CFG.
Definition: qm_soc_regs.h:1524
QM_RW uint32_t ctrl_low
CTL.
Definition: qm_soc_regs.h:1514
Link list multiblock mode.
Definition: qm_dma.h:69
qm_dma_t
DMA instances.
Definition: qm_soc_regs.h:1480
Link list multiblock mode with cyclic operation.
Definition: qm_dma.h:70
qm_dma_transfer_type_t
Definition: qm_dma.h:66
qm_dma_handshake_polarity_t
DMA Handshake Polarity.
Definition: qm_dma.h:21
DMA miscellaneous register map.
Definition: qm_soc_regs.h:1634
Memory to memory transfer.
Definition: qm_dma.h:57
qm_dma_burst_length_t
DMA Burst Transfer Length.
Definition: qm_dma.h:29
Single block mode.
Definition: qm_dma.h:67
QM_RW uint32_t llp_low
LLP.
Definition: qm_soc_regs.h:1512
QM_RW uint32_t chan_en_low
ChEnReg.
Definition: qm_soc_regs.h:1637
QM_RW uint32_t cfg_low
DmaCfgReg.
Definition: qm_soc_regs.h:1635
Peripheral to memory transfer.
Definition: qm_dma.h:60
void clk_sys_udelay(uint32_t microseconds)
Idle loop the processor for at least the value given in microseconds.
Definition: clk.c:352
QM_RW uint32_t ctrl_high
CTL.
Definition: qm_soc_regs.h:1515
qm_dma_channel_direction_t
DMA channel direction.
Definition: qm_dma.h:56
QM_RW uint32_t cfg_high
CFG.
Definition: qm_soc_regs.h:1525
DMA channel register map.
Definition: qm_soc_regs.h:1507
qm_dma_transfer_width_t
DMA Transfer Width.
Definition: qm_dma.h:44
Contiguous multiblock mode.
Definition: qm_dma.h:68
qm_dma_channel_id_t
DMA channel IDs.
Definition: qm_soc_regs.h:1486
qm_dma_handshake_interface_t
DMA hardware handshake interfaces.
Definition: qm_soc_regs.h:1493