Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/at_xdmac.c
54339 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
4
*
5
* Copyright (C) 2014 Atmel Corporation
6
*
7
* Author: Ludovic Desroches <[email protected]>
8
*/
9
10
#include <asm/barrier.h>
11
#include <dt-bindings/dma/at91.h>
12
#include <linux/clk.h>
13
#include <linux/dmaengine.h>
14
#include <linux/dmapool.h>
15
#include <linux/interrupt.h>
16
#include <linux/irq.h>
17
#include <linux/kernel.h>
18
#include <linux/list.h>
19
#include <linux/module.h>
20
#include <linux/of_dma.h>
21
#include <linux/of_platform.h>
22
#include <linux/platform_device.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
26
#include "dmaengine.h"
27
28
/* Global registers */
29
#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
30
#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
31
#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
32
#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
33
#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
34
#define AT_XDMAC_WRHP(i) (((i) & 0xF) << 4)
35
#define AT_XDMAC_WRMP(i) (((i) & 0xF) << 8)
36
#define AT_XDMAC_WRLP(i) (((i) & 0xF) << 12)
37
#define AT_XDMAC_RDHP(i) (((i) & 0xF) << 16)
38
#define AT_XDMAC_RDMP(i) (((i) & 0xF) << 20)
39
#define AT_XDMAC_RDLP(i) (((i) & 0xF) << 24)
40
#define AT_XDMAC_RDSG(i) (((i) & 0xF) << 28)
41
#define AT_XDMAC_GCFG_M2M (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
42
#define AT_XDMAC_GCFG_P2M (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
43
AT_XDMAC_WRHP(0x5))
44
#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
45
#define AT_XDMAC_PW0(i) (((i) & 0xF) << 0)
46
#define AT_XDMAC_PW1(i) (((i) & 0xF) << 4)
47
#define AT_XDMAC_PW2(i) (((i) & 0xF) << 8)
48
#define AT_XDMAC_PW3(i) (((i) & 0xF) << 12)
49
#define AT_XDMAC_GWAC_M2M 0
50
#define AT_XDMAC_GWAC_P2M (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
51
52
#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
53
#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
54
#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
55
#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
56
#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
57
#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
58
#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
59
#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
60
61
/* Channel relative registers offsets */
62
#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
63
#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
64
#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
65
#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
66
#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
67
#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
68
#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
69
#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
70
#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
71
#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
72
#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
73
#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
74
#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
75
#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
76
#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
77
#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
78
#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
79
#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
80
#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
81
#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
82
#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
83
#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
84
#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
85
#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
86
#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
87
#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
88
#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
89
#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
90
#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
91
#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
92
#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
93
#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
94
#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
95
#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
96
#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
97
#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
98
#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
99
#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
100
#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
101
#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
102
#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
103
#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
104
#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
105
#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
106
#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
107
#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
108
#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
109
#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
110
#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
111
#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
112
#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
113
#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
114
#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
115
#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
116
#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
117
#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
118
#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
119
#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
120
#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
121
#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
122
#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
123
#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
124
#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
125
#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
126
#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
127
#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
128
#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
129
#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
130
#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
131
#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
132
#define AT_XDMAC_CC_DWIDTH_OFFSET 11
133
#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
134
#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
135
#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
136
#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
137
#define AT_XDMAC_CC_DWIDTH_WORD 0x2
138
#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
139
#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
140
#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
141
#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
142
#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
143
#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
144
#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
145
#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
146
#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
147
#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
148
#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
149
#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
150
#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
151
#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
152
#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
153
#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
154
#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
155
#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
156
#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
157
#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
158
#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
159
#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
160
#define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
161
#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
162
#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
163
#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
164
165
/* Microblock control members */
166
#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
167
#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
168
#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
169
#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
170
#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
171
#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
172
#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
173
#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
174
175
#define AT_XDMAC_MAX_CHAN 0x20
176
#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
177
#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
178
#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
179
180
#define AT_XDMAC_DMA_BUSWIDTHS\
181
(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
182
BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
183
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
184
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
185
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
186
187
enum atc_status {
188
AT_XDMAC_CHAN_IS_CYCLIC = 0,
189
AT_XDMAC_CHAN_IS_PAUSED,
190
AT_XDMAC_CHAN_IS_PAUSED_INTERNAL,
191
};
192
193
struct at_xdmac_layout {
194
/* Global Channel Read Suspend Register */
195
u8 grs;
196
/* Global Write Suspend Register */
197
u8 gws;
198
/* Global Channel Read Write Suspend Register */
199
u8 grws;
200
/* Global Channel Read Write Resume Register */
201
u8 grwr;
202
/* Global Channel Software Request Register */
203
u8 gswr;
204
/* Global channel Software Request Status Register */
205
u8 gsws;
206
/* Global Channel Software Flush Request Register */
207
u8 gswf;
208
/* Channel reg base */
209
u8 chan_cc_reg_base;
210
/* Source/Destination Interface must be specified or not */
211
bool sdif;
212
/* AXI queue priority configuration supported */
213
bool axi_config;
214
};
215
216
/* ----- Channels ----- */
217
struct at_xdmac_chan {
218
struct dma_chan chan;
219
void __iomem *ch_regs;
220
u32 mask; /* Channel Mask */
221
u32 cfg; /* Channel Configuration Register */
222
u8 perid; /* Peripheral ID */
223
u8 perif; /* Peripheral Interface */
224
u8 memif; /* Memory Interface */
225
u32 save_cc;
226
u32 save_cim;
227
u32 save_cnda;
228
u32 save_cndc;
229
u32 irq_status;
230
unsigned long status;
231
struct tasklet_struct tasklet;
232
struct dma_slave_config sconfig;
233
234
spinlock_t lock;
235
236
struct list_head xfers_list;
237
struct list_head free_descs_list;
238
};
239
240
241
/* ----- Controller ----- */
242
struct at_xdmac {
243
struct dma_device dma;
244
void __iomem *regs;
245
struct device *dev;
246
int irq;
247
struct clk *clk;
248
u32 save_gim;
249
u32 save_gs;
250
struct dma_pool *at_xdmac_desc_pool;
251
const struct at_xdmac_layout *layout;
252
struct at_xdmac_chan chan[];
253
};
254
255
256
/* ----- Descriptors ----- */
257
258
/* Linked List Descriptor */
259
struct at_xdmac_lld {
260
u32 mbr_nda; /* Next Descriptor Member */
261
u32 mbr_ubc; /* Microblock Control Member */
262
u32 mbr_sa; /* Source Address Member */
263
u32 mbr_da; /* Destination Address Member */
264
u32 mbr_cfg; /* Configuration Register */
265
u32 mbr_bc; /* Block Control Register */
266
u32 mbr_ds; /* Data Stride Register */
267
u32 mbr_sus; /* Source Microblock Stride Register */
268
u32 mbr_dus; /* Destination Microblock Stride Register */
269
};
270
271
/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
272
struct at_xdmac_desc {
273
struct at_xdmac_lld lld;
274
enum dma_transfer_direction direction;
275
struct dma_async_tx_descriptor tx_dma_desc;
276
struct list_head desc_node;
277
/* Following members are only used by the first descriptor */
278
bool active_xfer;
279
unsigned int xfer_size;
280
struct list_head descs_list;
281
struct list_head xfer_node;
282
} __aligned(sizeof(u64));
283
284
static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
285
.grs = 0x28,
286
.gws = 0x2C,
287
.grws = 0x30,
288
.grwr = 0x34,
289
.gswr = 0x38,
290
.gsws = 0x3C,
291
.gswf = 0x40,
292
.chan_cc_reg_base = 0x50,
293
.sdif = true,
294
.axi_config = false,
295
};
296
297
static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
298
.grs = 0x30,
299
.gws = 0x38,
300
.grws = 0x40,
301
.grwr = 0x44,
302
.gswr = 0x48,
303
.gsws = 0x4C,
304
.gswf = 0x50,
305
.chan_cc_reg_base = 0x60,
306
.sdif = false,
307
.axi_config = true,
308
};
309
310
static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
311
{
312
return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
313
}
314
315
#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
316
#define at_xdmac_write(atxdmac, reg, value) \
317
writel_relaxed((value), (atxdmac)->regs + (reg))
318
319
#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
320
#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
321
322
static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
323
{
324
return container_of(dchan, struct at_xdmac_chan, chan);
325
}
326
327
static struct device *chan2dev(struct dma_chan *chan)
328
{
329
return &chan->dev->device;
330
}
331
332
static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
333
{
334
return container_of(ddev, struct at_xdmac, dma);
335
}
336
337
static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
338
{
339
return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
340
}
341
342
static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
343
{
344
return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
345
}
346
347
static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
348
{
349
return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
350
}
351
352
static inline int at_xdmac_chan_is_paused_internal(struct at_xdmac_chan *atchan)
353
{
354
return test_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
355
}
356
357
static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
358
{
359
return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
360
}
361
362
static inline u8 at_xdmac_get_dwidth(u32 cfg)
363
{
364
return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
365
};
366
367
static unsigned int init_nr_desc_per_channel = 64;
368
module_param(init_nr_desc_per_channel, uint, 0644);
369
MODULE_PARM_DESC(init_nr_desc_per_channel,
370
"initial descriptors per channel (default: 64)");
371
372
373
static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan)
374
{
375
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
376
struct at_xdmac_desc *desc, *_desc;
377
378
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
379
if (!desc->active_xfer)
380
continue;
381
382
pm_runtime_put_autosuspend(atxdmac->dev);
383
}
384
}
385
386
static int at_xdmac_runtime_resume_descriptors(struct at_xdmac_chan *atchan)
387
{
388
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
389
struct at_xdmac_desc *desc, *_desc;
390
int ret;
391
392
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
393
if (!desc->active_xfer)
394
continue;
395
396
ret = pm_runtime_resume_and_get(atxdmac->dev);
397
if (ret < 0)
398
return ret;
399
}
400
401
return 0;
402
}
403
404
static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
405
{
406
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
407
int ret;
408
409
ret = pm_runtime_resume_and_get(atxdmac->dev);
410
if (ret < 0)
411
return false;
412
413
ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask);
414
415
pm_runtime_put_autosuspend(atxdmac->dev);
416
417
return ret;
418
}
419
420
static void at_xdmac_off(struct at_xdmac *atxdmac, bool suspend_descriptors)
421
{
422
struct dma_chan *chan, *_chan;
423
struct at_xdmac_chan *atchan;
424
int ret;
425
426
ret = pm_runtime_resume_and_get(atxdmac->dev);
427
if (ret < 0)
428
return;
429
430
at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
431
432
/* Wait that all chans are disabled. */
433
while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
434
cpu_relax();
435
436
at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
437
438
/* Decrement runtime PM ref counter for each active descriptor. */
439
if (!list_empty(&atxdmac->dma.channels) && suspend_descriptors) {
440
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels,
441
device_node) {
442
atchan = to_at_xdmac_chan(chan);
443
at_xdmac_runtime_suspend_descriptors(atchan);
444
}
445
}
446
447
pm_runtime_put_autosuspend(atxdmac->dev);
448
}
449
450
/* Call with lock hold. */
451
static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
452
struct at_xdmac_desc *first)
453
{
454
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
455
u32 reg;
456
int ret;
457
458
ret = pm_runtime_resume_and_get(atxdmac->dev);
459
if (ret < 0)
460
return;
461
462
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
463
464
/* Set transfer as active to not try to start it again. */
465
first->active_xfer = true;
466
467
/* Tell xdmac where to get the first descriptor. */
468
reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
469
if (atxdmac->layout->sdif)
470
reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
471
472
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
473
474
/*
475
* When doing non cyclic transfer we need to use the next
476
* descriptor view 2 since some fields of the configuration register
477
* depend on transfer size and src/dest addresses.
478
*/
479
if (at_xdmac_chan_is_cyclic(atchan))
480
reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
481
else if ((first->lld.mbr_ubc &
482
AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
483
reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
484
else
485
reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
486
/*
487
* Even if the register will be updated from the configuration in the
488
* descriptor when using view 2 or higher, the PROT bit won't be set
489
* properly. This bit can be modified only by using the channel
490
* configuration register.
491
*/
492
at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
493
494
reg |= AT_XDMAC_CNDC_NDDUP
495
| AT_XDMAC_CNDC_NDSUP
496
| AT_XDMAC_CNDC_NDE;
497
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
498
499
dev_vdbg(chan2dev(&atchan->chan),
500
"%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
501
__func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
502
at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
503
at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
504
at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
505
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
506
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
507
508
at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
509
reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
510
/*
511
* Request Overflow Error is only for peripheral synchronized transfers
512
*/
513
if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
514
reg |= AT_XDMAC_CIE_ROIE;
515
516
/*
517
* There is no end of list when doing cyclic dma, we need to get
518
* an interrupt after each periods.
519
*/
520
if (at_xdmac_chan_is_cyclic(atchan))
521
at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
522
reg | AT_XDMAC_CIE_BIE);
523
else
524
at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
525
reg | AT_XDMAC_CIE_LIE);
526
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
527
dev_vdbg(chan2dev(&atchan->chan),
528
"%s: enable channel (0x%08x)\n", __func__, atchan->mask);
529
wmb();
530
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
531
532
dev_vdbg(chan2dev(&atchan->chan),
533
"%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
534
__func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
535
at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
536
at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
537
at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
538
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
539
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
540
}
541
542
static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
543
{
544
struct at_xdmac_desc *desc = txd_to_at_desc(tx);
545
struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
546
dma_cookie_t cookie;
547
unsigned long irqflags;
548
549
spin_lock_irqsave(&atchan->lock, irqflags);
550
cookie = dma_cookie_assign(tx);
551
552
list_add_tail(&desc->xfer_node, &atchan->xfers_list);
553
spin_unlock_irqrestore(&atchan->lock, irqflags);
554
555
dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
556
__func__, atchan, desc);
557
558
return cookie;
559
}
560
561
static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
562
gfp_t gfp_flags)
563
{
564
struct at_xdmac_desc *desc;
565
struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
566
dma_addr_t phys;
567
568
desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
569
if (desc) {
570
INIT_LIST_HEAD(&desc->descs_list);
571
dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
572
desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
573
desc->tx_dma_desc.phys = phys;
574
}
575
576
return desc;
577
}
578
579
static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
580
{
581
memset(&desc->lld, 0, sizeof(desc->lld));
582
INIT_LIST_HEAD(&desc->descs_list);
583
desc->direction = DMA_TRANS_NONE;
584
desc->xfer_size = 0;
585
desc->active_xfer = false;
586
}
587
588
/* Call must be protected by lock. */
589
static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
590
{
591
struct at_xdmac_desc *desc;
592
593
if (list_empty(&atchan->free_descs_list)) {
594
desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
595
} else {
596
desc = list_first_entry(&atchan->free_descs_list,
597
struct at_xdmac_desc, desc_node);
598
list_del(&desc->desc_node);
599
at_xdmac_init_used_desc(desc);
600
}
601
602
return desc;
603
}
604
605
static void at_xdmac_queue_desc(struct dma_chan *chan,
606
struct at_xdmac_desc *prev,
607
struct at_xdmac_desc *desc)
608
{
609
if (!prev || !desc)
610
return;
611
612
prev->lld.mbr_nda = desc->tx_dma_desc.phys;
613
prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
614
615
dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
616
__func__, prev, &prev->lld.mbr_nda);
617
}
618
619
static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
620
struct at_xdmac_desc *desc)
621
{
622
if (!desc)
623
return;
624
625
desc->lld.mbr_bc++;
626
627
dev_dbg(chan2dev(chan),
628
"%s: incrementing the block count of the desc 0x%p\n",
629
__func__, desc);
630
}
631
632
static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
633
struct of_dma *of_dma)
634
{
635
struct at_xdmac *atxdmac = of_dma->of_dma_data;
636
struct at_xdmac_chan *atchan;
637
struct dma_chan *chan;
638
struct device *dev = atxdmac->dma.dev;
639
640
if (dma_spec->args_count != 1) {
641
dev_err(dev, "dma phandler args: bad number of args\n");
642
return NULL;
643
}
644
645
chan = dma_get_any_slave_channel(&atxdmac->dma);
646
if (!chan) {
647
dev_err(dev, "can't get a dma channel\n");
648
return NULL;
649
}
650
651
atchan = to_at_xdmac_chan(chan);
652
atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
653
atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
654
atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
655
dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
656
atchan->memif, atchan->perif, atchan->perid);
657
658
return chan;
659
}
660
661
static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
662
enum dma_transfer_direction direction)
663
{
664
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
665
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
666
int csize, dwidth;
667
668
if (direction == DMA_DEV_TO_MEM) {
669
atchan->cfg =
670
AT91_XDMAC_DT_PERID(atchan->perid)
671
| AT_XDMAC_CC_DAM_INCREMENTED_AM
672
| AT_XDMAC_CC_SAM_FIXED_AM
673
| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
674
| AT_XDMAC_CC_DSYNC_PER2MEM
675
| AT_XDMAC_CC_MBSIZE_SIXTEEN
676
| AT_XDMAC_CC_TYPE_PER_TRAN;
677
if (atxdmac->layout->sdif)
678
atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
679
AT_XDMAC_CC_SIF(atchan->perif);
680
681
csize = ffs(atchan->sconfig.src_maxburst) - 1;
682
if (csize < 0) {
683
dev_err(chan2dev(chan), "invalid src maxburst value\n");
684
return -EINVAL;
685
}
686
atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
687
dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
688
if (dwidth < 0) {
689
dev_err(chan2dev(chan), "invalid src addr width value\n");
690
return -EINVAL;
691
}
692
atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
693
} else if (direction == DMA_MEM_TO_DEV) {
694
atchan->cfg =
695
AT91_XDMAC_DT_PERID(atchan->perid)
696
| AT_XDMAC_CC_DAM_FIXED_AM
697
| AT_XDMAC_CC_SAM_INCREMENTED_AM
698
| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
699
| AT_XDMAC_CC_DSYNC_MEM2PER
700
| AT_XDMAC_CC_MBSIZE_SIXTEEN
701
| AT_XDMAC_CC_TYPE_PER_TRAN;
702
if (atxdmac->layout->sdif)
703
atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
704
AT_XDMAC_CC_SIF(atchan->memif);
705
706
csize = ffs(atchan->sconfig.dst_maxburst) - 1;
707
if (csize < 0) {
708
dev_err(chan2dev(chan), "invalid src maxburst value\n");
709
return -EINVAL;
710
}
711
atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
712
dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
713
if (dwidth < 0) {
714
dev_err(chan2dev(chan), "invalid dst addr width value\n");
715
return -EINVAL;
716
}
717
atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
718
}
719
720
dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
721
722
return 0;
723
}
724
725
/*
726
* Only check that maxburst and addr width values are supported by
727
* the controller but not that the configuration is good to perform the
728
* transfer since we don't know the direction at this stage.
729
*/
730
static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
731
{
732
if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
733
|| (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
734
return -EINVAL;
735
736
if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
737
|| (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
738
return -EINVAL;
739
740
return 0;
741
}
742
743
static int at_xdmac_set_slave_config(struct dma_chan *chan,
744
struct dma_slave_config *sconfig)
745
{
746
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
747
748
if (at_xdmac_check_slave_config(sconfig)) {
749
dev_err(chan2dev(chan), "invalid slave configuration\n");
750
return -EINVAL;
751
}
752
753
memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
754
755
return 0;
756
}
757
758
static struct dma_async_tx_descriptor *
759
at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
760
unsigned int sg_len, enum dma_transfer_direction direction,
761
unsigned long flags, void *context)
762
{
763
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
764
struct at_xdmac_desc *first = NULL, *prev = NULL;
765
struct scatterlist *sg;
766
int i;
767
unsigned int xfer_size = 0;
768
unsigned long irqflags;
769
struct dma_async_tx_descriptor *ret = NULL;
770
771
if (!sgl)
772
return NULL;
773
774
if (!is_slave_direction(direction)) {
775
dev_err(chan2dev(chan), "invalid DMA direction\n");
776
return NULL;
777
}
778
779
dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
780
__func__, sg_len,
781
direction == DMA_MEM_TO_DEV ? "to device" : "from device",
782
flags);
783
784
/* Protect dma_sconfig field that can be modified by set_slave_conf. */
785
spin_lock_irqsave(&atchan->lock, irqflags);
786
787
if (at_xdmac_compute_chan_conf(chan, direction))
788
goto spin_unlock;
789
790
/* Prepare descriptors. */
791
for_each_sg(sgl, sg, sg_len, i) {
792
struct at_xdmac_desc *desc = NULL;
793
u32 len, mem, dwidth, fixed_dwidth;
794
795
len = sg_dma_len(sg);
796
mem = sg_dma_address(sg);
797
if (unlikely(!len)) {
798
dev_err(chan2dev(chan), "sg data length is zero\n");
799
goto spin_unlock;
800
}
801
dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
802
__func__, i, len, mem);
803
804
desc = at_xdmac_get_desc(atchan);
805
if (!desc) {
806
dev_err(chan2dev(chan), "can't get descriptor\n");
807
if (first)
808
list_splice_tail_init(&first->descs_list,
809
&atchan->free_descs_list);
810
goto spin_unlock;
811
}
812
813
/* Linked list descriptor setup. */
814
if (direction == DMA_DEV_TO_MEM) {
815
desc->lld.mbr_sa = atchan->sconfig.src_addr;
816
desc->lld.mbr_da = mem;
817
} else {
818
desc->lld.mbr_sa = mem;
819
desc->lld.mbr_da = atchan->sconfig.dst_addr;
820
}
821
dwidth = at_xdmac_get_dwidth(atchan->cfg);
822
fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
823
? dwidth
824
: AT_XDMAC_CC_DWIDTH_BYTE;
825
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
826
| AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
827
| AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
828
| (len >> fixed_dwidth); /* microblock length */
829
desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
830
AT_XDMAC_CC_DWIDTH(fixed_dwidth);
831
dev_dbg(chan2dev(chan),
832
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
833
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
834
835
/* Chain lld. */
836
if (prev)
837
at_xdmac_queue_desc(chan, prev, desc);
838
839
prev = desc;
840
if (!first)
841
first = desc;
842
843
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
844
__func__, desc, first);
845
list_add_tail(&desc->desc_node, &first->descs_list);
846
xfer_size += len;
847
}
848
849
850
first->tx_dma_desc.flags = flags;
851
first->xfer_size = xfer_size;
852
first->direction = direction;
853
ret = &first->tx_dma_desc;
854
855
spin_unlock:
856
spin_unlock_irqrestore(&atchan->lock, irqflags);
857
return ret;
858
}
859
860
static struct dma_async_tx_descriptor *
861
at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
862
size_t buf_len, size_t period_len,
863
enum dma_transfer_direction direction,
864
unsigned long flags)
865
{
866
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
867
struct at_xdmac_desc *first = NULL, *prev = NULL;
868
unsigned int periods = buf_len / period_len;
869
int i;
870
unsigned long irqflags;
871
872
dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
873
__func__, &buf_addr, buf_len, period_len,
874
direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
875
876
if (!is_slave_direction(direction)) {
877
dev_err(chan2dev(chan), "invalid DMA direction\n");
878
return NULL;
879
}
880
881
if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
882
dev_err(chan2dev(chan), "channel currently used\n");
883
return NULL;
884
}
885
886
if (at_xdmac_compute_chan_conf(chan, direction))
887
return NULL;
888
889
for (i = 0; i < periods; i++) {
890
struct at_xdmac_desc *desc = NULL;
891
892
spin_lock_irqsave(&atchan->lock, irqflags);
893
desc = at_xdmac_get_desc(atchan);
894
if (!desc) {
895
dev_err(chan2dev(chan), "can't get descriptor\n");
896
if (first)
897
list_splice_tail_init(&first->descs_list,
898
&atchan->free_descs_list);
899
spin_unlock_irqrestore(&atchan->lock, irqflags);
900
return NULL;
901
}
902
spin_unlock_irqrestore(&atchan->lock, irqflags);
903
dev_dbg(chan2dev(chan),
904
"%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
905
__func__, desc, &desc->tx_dma_desc.phys);
906
907
if (direction == DMA_DEV_TO_MEM) {
908
desc->lld.mbr_sa = atchan->sconfig.src_addr;
909
desc->lld.mbr_da = buf_addr + i * period_len;
910
} else {
911
desc->lld.mbr_sa = buf_addr + i * period_len;
912
desc->lld.mbr_da = atchan->sconfig.dst_addr;
913
}
914
desc->lld.mbr_cfg = atchan->cfg;
915
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
916
| AT_XDMAC_MBR_UBC_NDEN
917
| AT_XDMAC_MBR_UBC_NSEN
918
| period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
919
920
dev_dbg(chan2dev(chan),
921
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
922
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
923
924
/* Chain lld. */
925
if (prev)
926
at_xdmac_queue_desc(chan, prev, desc);
927
928
prev = desc;
929
if (!first)
930
first = desc;
931
932
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
933
__func__, desc, first);
934
list_add_tail(&desc->desc_node, &first->descs_list);
935
}
936
937
at_xdmac_queue_desc(chan, prev, first);
938
first->tx_dma_desc.flags = flags;
939
first->xfer_size = buf_len;
940
first->direction = direction;
941
942
return &first->tx_dma_desc;
943
}
944
945
static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
946
{
947
u32 width;
948
949
/*
950
* Check address alignment to select the greater data width we
951
* can use.
952
*
953
* Some XDMAC implementations don't provide dword transfer, in
954
* this case selecting dword has the same behavior as
955
* selecting word transfers.
956
*/
957
if (!(addr & 7)) {
958
width = AT_XDMAC_CC_DWIDTH_DWORD;
959
dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
960
} else if (!(addr & 3)) {
961
width = AT_XDMAC_CC_DWIDTH_WORD;
962
dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
963
} else if (!(addr & 1)) {
964
width = AT_XDMAC_CC_DWIDTH_HALFWORD;
965
dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
966
} else {
967
width = AT_XDMAC_CC_DWIDTH_BYTE;
968
dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
969
}
970
971
return width;
972
}
973
974
static struct at_xdmac_desc *
975
at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
976
struct at_xdmac_chan *atchan,
977
struct at_xdmac_desc *prev,
978
dma_addr_t src, dma_addr_t dst,
979
struct dma_interleaved_template *xt,
980
struct data_chunk *chunk)
981
{
982
struct at_xdmac_desc *desc;
983
u32 dwidth;
984
unsigned long flags;
985
size_t ublen;
986
/*
987
* WARNING: The channel configuration is set here since there is no
988
* dmaengine_slave_config call in this case. Moreover we don't know the
989
* direction, it involves we can't dynamically set the source and dest
990
* interface so we have to use the same one. Only interface 0 allows EBI
991
* access. Hopefully we can access DDR through both ports (at least on
992
* SAMA5D4x), so we can use the same interface for source and dest,
993
* that solves the fact we don't know the direction.
994
* ERRATA: Even if useless for memory transfers, the PERID has to not
995
* match the one of another channel. If not, it could lead to spurious
996
* flag status.
997
* For SAMA7G5x case, the SIF and DIF fields are no longer used.
998
* Thus, no need to have the SIF/DIF interfaces here.
999
* For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1000
* zero.
1001
*/
1002
u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1003
| AT_XDMAC_CC_MBSIZE_SIXTEEN
1004
| AT_XDMAC_CC_TYPE_MEM_TRAN;
1005
1006
dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
1007
if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1008
dev_dbg(chan2dev(chan),
1009
"%s: chunk too big (%zu, max size %lu)...\n",
1010
__func__, chunk->size,
1011
AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
1012
return NULL;
1013
}
1014
1015
if (prev)
1016
dev_dbg(chan2dev(chan),
1017
"Adding items at the end of desc 0x%p\n", prev);
1018
1019
if (xt->src_inc) {
1020
if (xt->src_sgl)
1021
chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
1022
else
1023
chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
1024
}
1025
1026
if (xt->dst_inc) {
1027
if (xt->dst_sgl)
1028
chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
1029
else
1030
chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
1031
}
1032
1033
spin_lock_irqsave(&atchan->lock, flags);
1034
desc = at_xdmac_get_desc(atchan);
1035
spin_unlock_irqrestore(&atchan->lock, flags);
1036
if (!desc) {
1037
dev_err(chan2dev(chan), "can't get descriptor\n");
1038
return NULL;
1039
}
1040
1041
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1042
1043
ublen = chunk->size >> dwidth;
1044
1045
desc->lld.mbr_sa = src;
1046
desc->lld.mbr_da = dst;
1047
desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
1048
desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
1049
1050
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1051
| AT_XDMAC_MBR_UBC_NDEN
1052
| AT_XDMAC_MBR_UBC_NSEN
1053
| ublen;
1054
desc->lld.mbr_cfg = chan_cc;
1055
1056
dev_dbg(chan2dev(chan),
1057
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1058
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
1059
desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1060
1061
/* Chain lld. */
1062
if (prev)
1063
at_xdmac_queue_desc(chan, prev, desc);
1064
1065
return desc;
1066
}
1067
1068
static struct dma_async_tx_descriptor *
1069
at_xdmac_prep_interleaved(struct dma_chan *chan,
1070
struct dma_interleaved_template *xt,
1071
unsigned long flags)
1072
{
1073
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1074
struct at_xdmac_desc *prev = NULL, *first = NULL;
1075
dma_addr_t dst_addr, src_addr;
1076
size_t src_skip = 0, dst_skip = 0, len = 0;
1077
struct data_chunk *chunk;
1078
int i;
1079
1080
if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
1081
return NULL;
1082
1083
/*
1084
* TODO: Handle the case where we have to repeat a chain of
1085
* descriptors...
1086
*/
1087
if ((xt->numf > 1) && (xt->frame_size > 1))
1088
return NULL;
1089
1090
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
1091
__func__, &xt->src_start, &xt->dst_start, xt->numf,
1092
xt->frame_size, flags);
1093
1094
src_addr = xt->src_start;
1095
dst_addr = xt->dst_start;
1096
1097
if (xt->numf > 1) {
1098
first = at_xdmac_interleaved_queue_desc(chan, atchan,
1099
NULL,
1100
src_addr, dst_addr,
1101
xt, xt->sgl);
1102
if (!first)
1103
return NULL;
1104
1105
/* Length of the block is (BLEN+1) microblocks. */
1106
for (i = 0; i < xt->numf - 1; i++)
1107
at_xdmac_increment_block_count(chan, first);
1108
1109
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1110
__func__, first, first);
1111
list_add_tail(&first->desc_node, &first->descs_list);
1112
} else {
1113
for (i = 0; i < xt->frame_size; i++) {
1114
size_t src_icg = 0, dst_icg = 0;
1115
struct at_xdmac_desc *desc;
1116
1117
chunk = xt->sgl + i;
1118
1119
dst_icg = dmaengine_get_dst_icg(xt, chunk);
1120
src_icg = dmaengine_get_src_icg(xt, chunk);
1121
1122
src_skip = chunk->size + src_icg;
1123
dst_skip = chunk->size + dst_icg;
1124
1125
dev_dbg(chan2dev(chan),
1126
"%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
1127
__func__, chunk->size, src_icg, dst_icg);
1128
1129
desc = at_xdmac_interleaved_queue_desc(chan, atchan,
1130
prev,
1131
src_addr, dst_addr,
1132
xt, chunk);
1133
if (!desc) {
1134
if (first)
1135
list_splice_tail_init(&first->descs_list,
1136
&atchan->free_descs_list);
1137
return NULL;
1138
}
1139
1140
if (!first)
1141
first = desc;
1142
1143
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1144
__func__, desc, first);
1145
list_add_tail(&desc->desc_node, &first->descs_list);
1146
1147
if (xt->src_sgl)
1148
src_addr += src_skip;
1149
1150
if (xt->dst_sgl)
1151
dst_addr += dst_skip;
1152
1153
len += chunk->size;
1154
prev = desc;
1155
}
1156
}
1157
1158
first->tx_dma_desc.cookie = -EBUSY;
1159
first->tx_dma_desc.flags = flags;
1160
first->xfer_size = len;
1161
1162
return &first->tx_dma_desc;
1163
}
1164
1165
static struct dma_async_tx_descriptor *
1166
at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1167
size_t len, unsigned long flags)
1168
{
1169
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1170
struct at_xdmac_desc *first = NULL, *prev = NULL;
1171
size_t remaining_size = len, xfer_size = 0, ublen;
1172
dma_addr_t src_addr = src, dst_addr = dest;
1173
u32 dwidth;
1174
/*
1175
* WARNING: We don't know the direction, it involves we can't
1176
* dynamically set the source and dest interface so we have to use the
1177
* same one. Only interface 0 allows EBI access. Hopefully we can
1178
* access DDR through both ports (at least on SAMA5D4x), so we can use
1179
* the same interface for source and dest, that solves the fact we
1180
* don't know the direction.
1181
* ERRATA: Even if useless for memory transfers, the PERID has to not
1182
* match the one of another channel. If not, it could lead to spurious
1183
* flag status.
1184
* For SAMA7G5x case, the SIF and DIF fields are no longer used.
1185
* Thus, no need to have the SIF/DIF interfaces here.
1186
* For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1187
* zero.
1188
*/
1189
u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1190
| AT_XDMAC_CC_DAM_INCREMENTED_AM
1191
| AT_XDMAC_CC_SAM_INCREMENTED_AM
1192
| AT_XDMAC_CC_MBSIZE_SIXTEEN
1193
| AT_XDMAC_CC_TYPE_MEM_TRAN;
1194
unsigned long irqflags;
1195
1196
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1197
__func__, &src, &dest, len, flags);
1198
1199
if (unlikely(!len))
1200
return NULL;
1201
1202
dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1203
1204
/* Prepare descriptors. */
1205
while (remaining_size) {
1206
struct at_xdmac_desc *desc = NULL;
1207
1208
dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1209
1210
spin_lock_irqsave(&atchan->lock, irqflags);
1211
desc = at_xdmac_get_desc(atchan);
1212
spin_unlock_irqrestore(&atchan->lock, irqflags);
1213
if (!desc) {
1214
dev_err(chan2dev(chan), "can't get descriptor\n");
1215
if (first)
1216
list_splice_tail_init(&first->descs_list,
1217
&atchan->free_descs_list);
1218
return NULL;
1219
}
1220
1221
/* Update src and dest addresses. */
1222
src_addr += xfer_size;
1223
dst_addr += xfer_size;
1224
1225
if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1226
xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1227
else
1228
xfer_size = remaining_size;
1229
1230
dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1231
1232
/* Check remaining length and change data width if needed. */
1233
dwidth = at_xdmac_align_width(chan,
1234
src_addr | dst_addr | xfer_size);
1235
chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1236
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1237
1238
ublen = xfer_size >> dwidth;
1239
remaining_size -= xfer_size;
1240
1241
desc->lld.mbr_sa = src_addr;
1242
desc->lld.mbr_da = dst_addr;
1243
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1244
| AT_XDMAC_MBR_UBC_NDEN
1245
| AT_XDMAC_MBR_UBC_NSEN
1246
| ublen;
1247
desc->lld.mbr_cfg = chan_cc;
1248
1249
dev_dbg(chan2dev(chan),
1250
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1251
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1252
1253
/* Chain lld. */
1254
if (prev)
1255
at_xdmac_queue_desc(chan, prev, desc);
1256
1257
prev = desc;
1258
if (!first)
1259
first = desc;
1260
1261
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1262
__func__, desc, first);
1263
list_add_tail(&desc->desc_node, &first->descs_list);
1264
}
1265
1266
first->tx_dma_desc.flags = flags;
1267
first->xfer_size = len;
1268
1269
return &first->tx_dma_desc;
1270
}
1271
1272
static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1273
struct at_xdmac_chan *atchan,
1274
dma_addr_t dst_addr,
1275
size_t len,
1276
int value)
1277
{
1278
struct at_xdmac_desc *desc;
1279
unsigned long flags;
1280
size_t ublen;
1281
u32 dwidth;
1282
char pattern;
1283
/*
1284
* WARNING: The channel configuration is set here since there is no
1285
* dmaengine_slave_config call in this case. Moreover we don't know the
1286
* direction, it involves we can't dynamically set the source and dest
1287
* interface so we have to use the same one. Only interface 0 allows EBI
1288
* access. Hopefully we can access DDR through both ports (at least on
1289
* SAMA5D4x), so we can use the same interface for source and dest,
1290
* that solves the fact we don't know the direction.
1291
* ERRATA: Even if useless for memory transfers, the PERID has to not
1292
* match the one of another channel. If not, it could lead to spurious
1293
* flag status.
1294
* For SAMA7G5x case, the SIF and DIF fields are no longer used.
1295
* Thus, no need to have the SIF/DIF interfaces here.
1296
* For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1297
* zero.
1298
*/
1299
u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1300
| AT_XDMAC_CC_DAM_UBS_AM
1301
| AT_XDMAC_CC_SAM_INCREMENTED_AM
1302
| AT_XDMAC_CC_MBSIZE_SIXTEEN
1303
| AT_XDMAC_CC_MEMSET_HW_MODE
1304
| AT_XDMAC_CC_TYPE_MEM_TRAN;
1305
1306
dwidth = at_xdmac_align_width(chan, dst_addr);
1307
1308
if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1309
dev_err(chan2dev(chan),
1310
"%s: Transfer too large, aborting...\n",
1311
__func__);
1312
return NULL;
1313
}
1314
1315
spin_lock_irqsave(&atchan->lock, flags);
1316
desc = at_xdmac_get_desc(atchan);
1317
spin_unlock_irqrestore(&atchan->lock, flags);
1318
if (!desc) {
1319
dev_err(chan2dev(chan), "can't get descriptor\n");
1320
return NULL;
1321
}
1322
1323
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1324
1325
/* Only the first byte of value is to be used according to dmaengine */
1326
pattern = (char)value;
1327
1328
ublen = len >> dwidth;
1329
1330
desc->lld.mbr_da = dst_addr;
1331
desc->lld.mbr_ds = (pattern << 24) |
1332
(pattern << 16) |
1333
(pattern << 8) |
1334
pattern;
1335
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1336
| AT_XDMAC_MBR_UBC_NDEN
1337
| AT_XDMAC_MBR_UBC_NSEN
1338
| ublen;
1339
desc->lld.mbr_cfg = chan_cc;
1340
1341
dev_dbg(chan2dev(chan),
1342
"%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1343
__func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1344
desc->lld.mbr_cfg);
1345
1346
return desc;
1347
}
1348
1349
static struct dma_async_tx_descriptor *
1350
at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1351
size_t len, unsigned long flags)
1352
{
1353
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1354
struct at_xdmac_desc *desc;
1355
1356
dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
1357
__func__, &dest, len, value, flags);
1358
1359
if (unlikely(!len))
1360
return NULL;
1361
1362
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1363
if (!desc)
1364
return NULL;
1365
list_add_tail(&desc->desc_node, &desc->descs_list);
1366
1367
desc->tx_dma_desc.cookie = -EBUSY;
1368
desc->tx_dma_desc.flags = flags;
1369
desc->xfer_size = len;
1370
1371
return &desc->tx_dma_desc;
1372
}
1373
1374
static struct dma_async_tx_descriptor *
1375
at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1376
unsigned int sg_len, int value,
1377
unsigned long flags)
1378
{
1379
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1380
struct at_xdmac_desc *desc, *pdesc = NULL,
1381
*ppdesc = NULL, *first = NULL;
1382
struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
1383
size_t stride = 0, pstride = 0, len = 0;
1384
int i;
1385
1386
if (!sgl)
1387
return NULL;
1388
1389
dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1390
__func__, sg_len, value, flags);
1391
1392
/* Prepare descriptors. */
1393
for_each_sg(sgl, sg, sg_len, i) {
1394
dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1395
__func__, &sg_dma_address(sg), sg_dma_len(sg),
1396
value, flags);
1397
desc = at_xdmac_memset_create_desc(chan, atchan,
1398
sg_dma_address(sg),
1399
sg_dma_len(sg),
1400
value);
1401
if (!desc && first)
1402
list_splice_tail_init(&first->descs_list,
1403
&atchan->free_descs_list);
1404
1405
if (!first)
1406
first = desc;
1407
1408
/* Update our strides */
1409
pstride = stride;
1410
if (psg)
1411
stride = sg_dma_address(sg) -
1412
(sg_dma_address(psg) + sg_dma_len(psg));
1413
1414
/*
1415
* The scatterlist API gives us only the address and
1416
* length of each elements.
1417
*
1418
* Unfortunately, we don't have the stride, which we
1419
* will need to compute.
1420
*
1421
* That make us end up in a situation like this one:
1422
* len stride len stride len
1423
* +-------+ +-------+ +-------+
1424
* | N-2 | | N-1 | | N |
1425
* +-------+ +-------+ +-------+
1426
*
1427
* We need all these three elements (N-2, N-1 and N)
1428
* to actually take the decision on whether we need to
1429
* queue N-1 or reuse N-2.
1430
*
1431
* We will only consider N if it is the last element.
1432
*/
1433
if (ppdesc && pdesc) {
1434
if ((stride == pstride) &&
1435
(sg_dma_len(ppsg) == sg_dma_len(psg))) {
1436
dev_dbg(chan2dev(chan),
1437
"%s: desc 0x%p can be merged with desc 0x%p\n",
1438
__func__, pdesc, ppdesc);
1439
1440
/*
1441
* Increment the block count of the
1442
* N-2 descriptor
1443
*/
1444
at_xdmac_increment_block_count(chan, ppdesc);
1445
ppdesc->lld.mbr_dus = stride;
1446
1447
/*
1448
* Put back the N-1 descriptor in the
1449
* free descriptor list
1450
*/
1451
list_add_tail(&pdesc->desc_node,
1452
&atchan->free_descs_list);
1453
1454
/*
1455
* Make our N-1 descriptor pointer
1456
* point to the N-2 since they were
1457
* actually merged.
1458
*/
1459
pdesc = ppdesc;
1460
1461
/*
1462
* Rule out the case where we don't have
1463
* pstride computed yet (our second sg
1464
* element)
1465
*
1466
* We also want to catch the case where there
1467
* would be a negative stride,
1468
*/
1469
} else if (pstride ||
1470
sg_dma_address(sg) < sg_dma_address(psg)) {
1471
/*
1472
* Queue the N-1 descriptor after the
1473
* N-2
1474
*/
1475
at_xdmac_queue_desc(chan, ppdesc, pdesc);
1476
1477
/*
1478
* Add the N-1 descriptor to the list
1479
* of the descriptors used for this
1480
* transfer
1481
*/
1482
list_add_tail(&desc->desc_node,
1483
&first->descs_list);
1484
dev_dbg(chan2dev(chan),
1485
"%s: add desc 0x%p to descs_list 0x%p\n",
1486
__func__, desc, first);
1487
}
1488
}
1489
1490
/*
1491
* If we are the last element, just see if we have the
1492
* same size than the previous element.
1493
*
1494
* If so, we can merge it with the previous descriptor
1495
* since we don't care about the stride anymore.
1496
*/
1497
if ((i == (sg_len - 1)) &&
1498
sg_dma_len(psg) == sg_dma_len(sg)) {
1499
dev_dbg(chan2dev(chan),
1500
"%s: desc 0x%p can be merged with desc 0x%p\n",
1501
__func__, desc, pdesc);
1502
1503
/*
1504
* Increment the block count of the N-1
1505
* descriptor
1506
*/
1507
at_xdmac_increment_block_count(chan, pdesc);
1508
pdesc->lld.mbr_dus = stride;
1509
1510
/*
1511
* Put back the N descriptor in the free
1512
* descriptor list
1513
*/
1514
list_add_tail(&desc->desc_node,
1515
&atchan->free_descs_list);
1516
}
1517
1518
/* Update our descriptors */
1519
ppdesc = pdesc;
1520
pdesc = desc;
1521
1522
/* Update our scatter pointers */
1523
ppsg = psg;
1524
psg = sg;
1525
1526
len += sg_dma_len(sg);
1527
}
1528
1529
first->tx_dma_desc.cookie = -EBUSY;
1530
first->tx_dma_desc.flags = flags;
1531
first->xfer_size = len;
1532
1533
return &first->tx_dma_desc;
1534
}
1535
1536
static enum dma_status
1537
at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1538
struct dma_tx_state *txstate)
1539
{
1540
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1541
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1542
struct at_xdmac_desc *desc, *_desc, *iter;
1543
struct list_head *descs_list;
1544
enum dma_status ret;
1545
int residue, retry, pm_status;
1546
u32 cur_nda, check_nda, cur_ubc, mask, value;
1547
u8 dwidth = 0;
1548
unsigned long flags;
1549
bool initd;
1550
1551
ret = dma_cookie_status(chan, cookie, txstate);
1552
if (ret == DMA_COMPLETE || !txstate)
1553
return ret;
1554
1555
pm_status = pm_runtime_resume_and_get(atxdmac->dev);
1556
if (pm_status < 0)
1557
return DMA_ERROR;
1558
1559
spin_lock_irqsave(&atchan->lock, flags);
1560
1561
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1562
1563
/*
1564
* If the transfer has not been started yet, don't need to compute the
1565
* residue, it's the transfer length.
1566
*/
1567
if (!desc->active_xfer) {
1568
dma_set_residue(txstate, desc->xfer_size);
1569
goto spin_unlock;
1570
}
1571
1572
residue = desc->xfer_size;
1573
/*
1574
* Flush FIFO: only relevant when the transfer is source peripheral
1575
* synchronized. Flush is needed before reading CUBC because data in
1576
* the FIFO are not reported by CUBC. Reporting a residue of the
1577
* transfer length while we have data in FIFO can cause issue.
1578
* Usecase: atmel USART has a timeout which means I have received
1579
* characters but there is no more character received for a while. On
1580
* timeout, it requests the residue. If the data are in the DMA FIFO,
1581
* we will return a residue of the transfer length. It means no data
1582
* received. If an application is waiting for these data, it will hang
1583
* since we won't have another USART timeout without receiving new
1584
* data.
1585
*/
1586
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1587
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1588
if ((desc->lld.mbr_cfg & mask) == value) {
1589
at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1590
while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1591
cpu_relax();
1592
}
1593
1594
/*
1595
* The easiest way to compute the residue should be to pause the DMA
1596
* but doing this can lead to miss some data as some devices don't
1597
* have FIFO.
1598
* We need to read several registers because:
1599
* - DMA is running therefore a descriptor change is possible while
1600
* reading these registers
1601
* - When the block transfer is done, the value of the CUBC register
1602
* is set to its initial value until the fetch of the next descriptor.
1603
* This value will corrupt the residue calculation so we have to skip
1604
* it.
1605
*
1606
* INITD -------- ------------
1607
* |____________________|
1608
* _______________________ _______________
1609
* NDA @desc2 \/ @desc3
1610
* _______________________/\_______________
1611
* __________ ___________ _______________
1612
* CUBC 0 \/ MAX desc1 \/ MAX desc2
1613
* __________/\___________/\_______________
1614
*
1615
* Since descriptors are aligned on 64 bits, we can assume that
1616
* the update of NDA and CUBC is atomic.
1617
* Memory barriers are used to ensure the read order of the registers.
1618
* A max number of retries is set because unlikely it could never ends.
1619
*/
1620
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1621
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1622
rmb();
1623
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1624
rmb();
1625
initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1626
rmb();
1627
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1628
rmb();
1629
1630
if ((check_nda == cur_nda) && initd)
1631
break;
1632
}
1633
1634
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1635
ret = DMA_ERROR;
1636
goto spin_unlock;
1637
}
1638
1639
/*
1640
* Flush FIFO: only relevant when the transfer is source peripheral
1641
* synchronized. Another flush is needed here because CUBC is updated
1642
* when the controller sends the data write command. It can lead to
1643
* report data that are not written in the memory or the device. The
1644
* FIFO flush ensures that data are really written.
1645
*/
1646
if ((desc->lld.mbr_cfg & mask) == value) {
1647
at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1648
while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1649
cpu_relax();
1650
}
1651
1652
/*
1653
* Remove size of all microblocks already transferred and the current
1654
* one. Then add the remaining size to transfer of the current
1655
* microblock.
1656
*/
1657
descs_list = &desc->descs_list;
1658
list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
1659
dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
1660
residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
1661
if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
1662
desc = iter;
1663
break;
1664
}
1665
}
1666
residue += cur_ubc << dwidth;
1667
1668
dma_set_residue(txstate, residue);
1669
1670
dev_dbg(chan2dev(chan),
1671
"%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1672
__func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1673
1674
spin_unlock:
1675
spin_unlock_irqrestore(&atchan->lock, flags);
1676
pm_runtime_put_autosuspend(atxdmac->dev);
1677
return ret;
1678
}
1679
1680
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1681
{
1682
struct at_xdmac_desc *desc;
1683
1684
/*
1685
* If channel is enabled, do nothing, advance_work will be triggered
1686
* after the interruption.
1687
*/
1688
if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
1689
return;
1690
1691
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1692
xfer_node);
1693
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1694
if (!desc->active_xfer)
1695
at_xdmac_start_xfer(atchan, desc);
1696
}
1697
1698
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1699
{
1700
struct at_xdmac_desc *desc;
1701
struct dma_async_tx_descriptor *txd;
1702
1703
spin_lock_irq(&atchan->lock);
1704
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1705
__func__, atchan->irq_status);
1706
if (list_empty(&atchan->xfers_list)) {
1707
spin_unlock_irq(&atchan->lock);
1708
return;
1709
}
1710
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1711
xfer_node);
1712
spin_unlock_irq(&atchan->lock);
1713
txd = &desc->tx_dma_desc;
1714
if (txd->flags & DMA_PREP_INTERRUPT)
1715
dmaengine_desc_get_callback_invoke(txd, NULL);
1716
}
1717
1718
/* Called with atchan->lock held. */
1719
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
1720
{
1721
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1722
struct at_xdmac_desc *bad_desc;
1723
int ret;
1724
1725
ret = pm_runtime_resume_and_get(atxdmac->dev);
1726
if (ret < 0)
1727
return;
1728
1729
/*
1730
* The descriptor currently at the head of the active list is
1731
* broken. Since we don't have any way to report errors, we'll
1732
* just have to scream loudly and try to continue with other
1733
* descriptors queued (if any).
1734
*/
1735
if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1736
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1737
if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1738
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1739
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1740
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1741
1742
/* Channel must be disabled first as it's not done automatically */
1743
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1744
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1745
cpu_relax();
1746
1747
bad_desc = list_first_entry(&atchan->xfers_list,
1748
struct at_xdmac_desc,
1749
xfer_node);
1750
1751
/* Print bad descriptor's details if needed */
1752
dev_dbg(chan2dev(&atchan->chan),
1753
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
1754
__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
1755
bad_desc->lld.mbr_ubc);
1756
1757
pm_runtime_put_autosuspend(atxdmac->dev);
1758
1759
/* Then continue with usual descriptor management */
1760
}
1761
1762
static void at_xdmac_tasklet(struct tasklet_struct *t)
1763
{
1764
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
1765
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1766
struct at_xdmac_desc *desc;
1767
struct dma_async_tx_descriptor *txd;
1768
u32 error_mask;
1769
1770
if (at_xdmac_chan_is_cyclic(atchan))
1771
return at_xdmac_handle_cyclic(atchan);
1772
1773
error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
1774
AT_XDMAC_CIS_ROIS;
1775
1776
spin_lock_irq(&atchan->lock);
1777
1778
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1779
__func__, atchan->irq_status);
1780
1781
if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
1782
!(atchan->irq_status & error_mask)) {
1783
spin_unlock_irq(&atchan->lock);
1784
return;
1785
}
1786
1787
if (atchan->irq_status & error_mask)
1788
at_xdmac_handle_error(atchan);
1789
1790
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1791
xfer_node);
1792
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1793
if (!desc->active_xfer) {
1794
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1795
spin_unlock_irq(&atchan->lock);
1796
return;
1797
}
1798
1799
txd = &desc->tx_dma_desc;
1800
dma_cookie_complete(txd);
1801
/* Remove the transfer from the transfer list. */
1802
list_del(&desc->xfer_node);
1803
spin_unlock_irq(&atchan->lock);
1804
1805
if (txd->flags & DMA_PREP_INTERRUPT)
1806
dmaengine_desc_get_callback_invoke(txd, NULL);
1807
1808
dma_run_dependencies(txd);
1809
1810
spin_lock_irq(&atchan->lock);
1811
/* Move the xfer descriptors into the free descriptors list. */
1812
list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
1813
at_xdmac_advance_work(atchan);
1814
spin_unlock_irq(&atchan->lock);
1815
1816
/*
1817
* Decrement runtime PM ref counter incremented in
1818
* at_xdmac_start_xfer().
1819
*/
1820
pm_runtime_put_autosuspend(atxdmac->dev);
1821
}
1822
1823
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1824
{
1825
struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1826
struct at_xdmac_chan *atchan;
1827
u32 imr, status, pending;
1828
u32 chan_imr, chan_status;
1829
int i, ret = IRQ_NONE;
1830
1831
do {
1832
imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1833
status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1834
pending = status & imr;
1835
1836
dev_vdbg(atxdmac->dma.dev,
1837
"%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1838
__func__, status, imr, pending);
1839
1840
if (!pending)
1841
break;
1842
1843
/* We have to find which channel has generated the interrupt. */
1844
for (i = 0; i < atxdmac->dma.chancnt; i++) {
1845
if (!((1 << i) & pending))
1846
continue;
1847
1848
atchan = &atxdmac->chan[i];
1849
chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1850
chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1851
atchan->irq_status = chan_status & chan_imr;
1852
dev_vdbg(atxdmac->dma.dev,
1853
"%s: chan%d: imr=0x%x, status=0x%x\n",
1854
__func__, i, chan_imr, chan_status);
1855
dev_vdbg(chan2dev(&atchan->chan),
1856
"%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1857
__func__,
1858
at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1859
at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1860
at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1861
at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1862
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1863
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1864
1865
if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1866
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1867
1868
tasklet_schedule(&atchan->tasklet);
1869
ret = IRQ_HANDLED;
1870
}
1871
1872
} while (pending);
1873
1874
return ret;
1875
}
1876
1877
static void at_xdmac_issue_pending(struct dma_chan *chan)
1878
{
1879
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1880
unsigned long flags;
1881
1882
dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1883
1884
spin_lock_irqsave(&atchan->lock, flags);
1885
at_xdmac_advance_work(atchan);
1886
spin_unlock_irqrestore(&atchan->lock, flags);
1887
1888
return;
1889
}
1890
1891
static int at_xdmac_device_config(struct dma_chan *chan,
1892
struct dma_slave_config *config)
1893
{
1894
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1895
int ret;
1896
unsigned long flags;
1897
1898
dev_dbg(chan2dev(chan), "%s\n", __func__);
1899
1900
spin_lock_irqsave(&atchan->lock, flags);
1901
ret = at_xdmac_set_slave_config(chan, config);
1902
spin_unlock_irqrestore(&atchan->lock, flags);
1903
1904
return ret;
1905
}
1906
1907
static void at_xdmac_device_pause_set(struct at_xdmac *atxdmac,
1908
struct at_xdmac_chan *atchan)
1909
{
1910
at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
1911
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) &
1912
(AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1913
cpu_relax();
1914
}
1915
1916
static void at_xdmac_device_pause_internal(struct at_xdmac_chan *atchan)
1917
{
1918
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1919
unsigned long flags;
1920
1921
spin_lock_irqsave(&atchan->lock, flags);
1922
set_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
1923
at_xdmac_device_pause_set(atxdmac, atchan);
1924
spin_unlock_irqrestore(&atchan->lock, flags);
1925
}
1926
1927
static int at_xdmac_device_pause(struct dma_chan *chan)
1928
{
1929
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1930
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1931
unsigned long flags;
1932
int ret;
1933
1934
dev_dbg(chan2dev(chan), "%s\n", __func__);
1935
1936
if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1937
return 0;
1938
1939
ret = pm_runtime_resume_and_get(atxdmac->dev);
1940
if (ret < 0)
1941
return ret;
1942
1943
spin_lock_irqsave(&atchan->lock, flags);
1944
1945
at_xdmac_device_pause_set(atxdmac, atchan);
1946
/* Decrement runtime PM ref counter for each active descriptor. */
1947
at_xdmac_runtime_suspend_descriptors(atchan);
1948
1949
spin_unlock_irqrestore(&atchan->lock, flags);
1950
1951
pm_runtime_put_autosuspend(atxdmac->dev);
1952
1953
return 0;
1954
}
1955
1956
static void at_xdmac_device_resume_internal(struct at_xdmac_chan *atchan)
1957
{
1958
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1959
unsigned long flags;
1960
1961
spin_lock_irqsave(&atchan->lock, flags);
1962
at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1963
clear_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
1964
spin_unlock_irqrestore(&atchan->lock, flags);
1965
}
1966
1967
static int at_xdmac_device_resume(struct dma_chan *chan)
1968
{
1969
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1970
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1971
unsigned long flags;
1972
int ret;
1973
1974
dev_dbg(chan2dev(chan), "%s\n", __func__);
1975
1976
ret = pm_runtime_resume_and_get(atxdmac->dev);
1977
if (ret < 0)
1978
return ret;
1979
1980
spin_lock_irqsave(&atchan->lock, flags);
1981
if (!at_xdmac_chan_is_paused(atchan))
1982
goto unlock;
1983
1984
/* Increment runtime PM ref counter for each active descriptor. */
1985
ret = at_xdmac_runtime_resume_descriptors(atchan);
1986
if (ret < 0)
1987
goto unlock;
1988
1989
at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1990
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1991
1992
unlock:
1993
spin_unlock_irqrestore(&atchan->lock, flags);
1994
pm_runtime_put_autosuspend(atxdmac->dev);
1995
1996
return ret;
1997
}
1998
1999
static int at_xdmac_device_terminate_all(struct dma_chan *chan)
2000
{
2001
struct at_xdmac_desc *desc, *_desc;
2002
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2003
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
2004
unsigned long flags;
2005
int ret;
2006
2007
dev_dbg(chan2dev(chan), "%s\n", __func__);
2008
2009
ret = pm_runtime_resume_and_get(atxdmac->dev);
2010
if (ret < 0)
2011
return ret;
2012
2013
spin_lock_irqsave(&atchan->lock, flags);
2014
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
2015
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
2016
cpu_relax();
2017
2018
/* Cancel all pending transfers. */
2019
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
2020
list_del(&desc->xfer_node);
2021
list_splice_tail_init(&desc->descs_list,
2022
&atchan->free_descs_list);
2023
/*
2024
* We incremented the runtime PM reference count on
2025
* at_xdmac_start_xfer() for this descriptor. Now it's time
2026
* to release it.
2027
*/
2028
if (desc->active_xfer)
2029
pm_runtime_put_noidle(atxdmac->dev);
2030
}
2031
2032
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
2033
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
2034
spin_unlock_irqrestore(&atchan->lock, flags);
2035
2036
pm_runtime_put_autosuspend(atxdmac->dev);
2037
2038
return 0;
2039
}
2040
2041
static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
2042
{
2043
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2044
struct at_xdmac_desc *desc;
2045
int i;
2046
2047
if (at_xdmac_chan_is_enabled(atchan)) {
2048
dev_err(chan2dev(chan),
2049
"can't allocate channel resources (channel enabled)\n");
2050
return -EIO;
2051
}
2052
2053
if (!list_empty(&atchan->free_descs_list)) {
2054
dev_err(chan2dev(chan),
2055
"can't allocate channel resources (channel not free from a previous use)\n");
2056
return -EIO;
2057
}
2058
2059
for (i = 0; i < init_nr_desc_per_channel; i++) {
2060
desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
2061
if (!desc) {
2062
if (i == 0) {
2063
dev_warn(chan2dev(chan),
2064
"can't allocate any descriptors\n");
2065
return -EIO;
2066
}
2067
dev_warn(chan2dev(chan),
2068
"only %d descriptors have been allocated\n", i);
2069
break;
2070
}
2071
list_add_tail(&desc->desc_node, &atchan->free_descs_list);
2072
}
2073
2074
dma_cookie_init(chan);
2075
2076
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
2077
2078
return i;
2079
}
2080
2081
static void at_xdmac_free_chan_resources(struct dma_chan *chan)
2082
{
2083
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2084
struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
2085
struct at_xdmac_desc *desc, *_desc;
2086
2087
list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
2088
dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
2089
list_del(&desc->desc_node);
2090
dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
2091
}
2092
2093
return;
2094
}
2095
2096
static void at_xdmac_axi_config(struct platform_device *pdev)
2097
{
2098
struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2099
bool dev_m2m = false;
2100
u32 dma_requests;
2101
2102
if (!atxdmac->layout->axi_config)
2103
return; /* Not supported */
2104
2105
if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
2106
&dma_requests)) {
2107
dev_info(&pdev->dev, "controller in mem2mem mode.\n");
2108
dev_m2m = true;
2109
}
2110
2111
if (dev_m2m) {
2112
at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
2113
at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
2114
} else {
2115
at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
2116
at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
2117
}
2118
}
2119
2120
static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
2121
{
2122
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2123
struct dma_chan *chan, *_chan;
2124
2125
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2126
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2127
2128
/* Wait for transfer completion, except in cyclic case. */
2129
if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
2130
return -EAGAIN;
2131
}
2132
return 0;
2133
}
2134
2135
static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
2136
{
2137
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2138
struct dma_chan *chan, *_chan;
2139
int ret;
2140
2141
ret = pm_runtime_resume_and_get(atxdmac->dev);
2142
if (ret < 0)
2143
return ret;
2144
2145
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2146
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2147
2148
atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
2149
if (at_xdmac_chan_is_cyclic(atchan)) {
2150
if (!at_xdmac_chan_is_paused(atchan)) {
2151
dev_warn(chan2dev(chan), "%s: channel %d not paused\n",
2152
__func__, chan->chan_id);
2153
at_xdmac_device_pause_internal(atchan);
2154
at_xdmac_runtime_suspend_descriptors(atchan);
2155
}
2156
atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
2157
atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
2158
atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
2159
}
2160
}
2161
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
2162
atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
2163
2164
at_xdmac_off(atxdmac, false);
2165
pm_runtime_mark_last_busy(atxdmac->dev);
2166
pm_runtime_put_noidle(atxdmac->dev);
2167
clk_disable_unprepare(atxdmac->clk);
2168
2169
return 0;
2170
}
2171
2172
static int __maybe_unused atmel_xdmac_resume(struct device *dev)
2173
{
2174
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2175
struct at_xdmac_chan *atchan;
2176
struct dma_chan *chan, *_chan;
2177
struct platform_device *pdev = container_of(dev, struct platform_device, dev);
2178
int i, ret;
2179
2180
ret = clk_prepare_enable(atxdmac->clk);
2181
if (ret)
2182
return ret;
2183
2184
pm_runtime_get_noresume(atxdmac->dev);
2185
2186
at_xdmac_axi_config(pdev);
2187
2188
/* Clear pending interrupts. */
2189
for (i = 0; i < atxdmac->dma.chancnt; i++) {
2190
atchan = &atxdmac->chan[i];
2191
while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2192
cpu_relax();
2193
}
2194
2195
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
2196
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2197
atchan = to_at_xdmac_chan(chan);
2198
2199
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
2200
if (at_xdmac_chan_is_cyclic(atchan)) {
2201
/*
2202
* Resume only channels not explicitly paused by
2203
* consumers.
2204
*/
2205
if (at_xdmac_chan_is_paused_internal(atchan)) {
2206
ret = at_xdmac_runtime_resume_descriptors(atchan);
2207
if (ret < 0)
2208
return ret;
2209
at_xdmac_device_resume_internal(atchan);
2210
}
2211
2212
/*
2213
* We may resume from a deep sleep state where power
2214
* to DMA controller is cut-off. Thus, restore the
2215
* suspend state of channels set though dmaengine API.
2216
*/
2217
else if (at_xdmac_chan_is_paused(atchan))
2218
at_xdmac_device_pause_set(atxdmac, atchan);
2219
2220
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
2221
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
2222
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
2223
wmb();
2224
if (atxdmac->save_gs & atchan->mask)
2225
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
2226
}
2227
}
2228
2229
pm_runtime_put_autosuspend(atxdmac->dev);
2230
2231
return 0;
2232
}
2233
2234
static int __maybe_unused atmel_xdmac_runtime_suspend(struct device *dev)
2235
{
2236
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2237
2238
clk_disable(atxdmac->clk);
2239
2240
return 0;
2241
}
2242
2243
static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev)
2244
{
2245
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2246
2247
return clk_enable(atxdmac->clk);
2248
}
2249
2250
static inline int at_xdmac_get_channel_number(struct platform_device *pdev,
2251
u32 reg, u32 *pchannels)
2252
{
2253
int ret;
2254
2255
if (reg) {
2256
*pchannels = AT_XDMAC_NB_CH(reg);
2257
return 0;
2258
}
2259
2260
ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", pchannels);
2261
if (ret)
2262
dev_err(&pdev->dev, "can't get number of channels\n");
2263
2264
return ret;
2265
}
2266
2267
static int at_xdmac_probe(struct platform_device *pdev)
2268
{
2269
struct at_xdmac *atxdmac;
2270
int irq, ret;
2271
void __iomem *base;
2272
u32 nr_channels, i, reg;
2273
2274
irq = platform_get_irq(pdev, 0);
2275
if (irq < 0)
2276
return irq;
2277
2278
base = devm_platform_ioremap_resource(pdev, 0);
2279
if (IS_ERR(base))
2280
return PTR_ERR(base);
2281
2282
/*
2283
* Read number of xdmac channels, read helper function can't be used
2284
* since atxdmac is not yet allocated and we need to know the number
2285
* of channels to do the allocation.
2286
*/
2287
reg = readl_relaxed(base + AT_XDMAC_GTYPE);
2288
ret = at_xdmac_get_channel_number(pdev, reg, &nr_channels);
2289
if (ret)
2290
return ret;
2291
2292
if (nr_channels > AT_XDMAC_MAX_CHAN) {
2293
dev_err(&pdev->dev, "invalid number of channels (%u)\n",
2294
nr_channels);
2295
return -EINVAL;
2296
}
2297
2298
atxdmac = devm_kzalloc(&pdev->dev,
2299
struct_size(atxdmac, chan, nr_channels),
2300
GFP_KERNEL);
2301
if (!atxdmac) {
2302
dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
2303
return -ENOMEM;
2304
}
2305
2306
atxdmac->regs = base;
2307
atxdmac->irq = irq;
2308
atxdmac->dev = &pdev->dev;
2309
2310
atxdmac->layout = of_device_get_match_data(&pdev->dev);
2311
if (!atxdmac->layout)
2312
return -ENODEV;
2313
2314
atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
2315
if (IS_ERR(atxdmac->clk)) {
2316
dev_err(&pdev->dev, "can't get dma_clk\n");
2317
return PTR_ERR(atxdmac->clk);
2318
}
2319
2320
/* Do not use dev res to prevent races with tasklet */
2321
ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
2322
if (ret) {
2323
dev_err(&pdev->dev, "can't request irq\n");
2324
return ret;
2325
}
2326
2327
ret = clk_prepare_enable(atxdmac->clk);
2328
if (ret) {
2329
dev_err(&pdev->dev, "can't prepare or enable clock\n");
2330
goto err_free_irq;
2331
}
2332
2333
atxdmac->at_xdmac_desc_pool =
2334
dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
2335
sizeof(struct at_xdmac_desc), 4, 0);
2336
if (!atxdmac->at_xdmac_desc_pool) {
2337
dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
2338
ret = -ENOMEM;
2339
goto err_clk_disable;
2340
}
2341
2342
dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
2343
dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
2344
dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
2345
dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
2346
dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
2347
dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
2348
/*
2349
* Without DMA_PRIVATE the driver is not able to allocate more than
2350
* one channel, second allocation fails in private_candidate.
2351
*/
2352
dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
2353
atxdmac->dma.dev = &pdev->dev;
2354
atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
2355
atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
2356
atxdmac->dma.device_tx_status = at_xdmac_tx_status;
2357
atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
2358
atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
2359
atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
2360
atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
2361
atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
2362
atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
2363
atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
2364
atxdmac->dma.device_config = at_xdmac_device_config;
2365
atxdmac->dma.device_pause = at_xdmac_device_pause;
2366
atxdmac->dma.device_resume = at_xdmac_device_resume;
2367
atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
2368
atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2369
atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2370
atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2371
atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2372
2373
platform_set_drvdata(pdev, atxdmac);
2374
2375
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2376
pm_runtime_use_autosuspend(&pdev->dev);
2377
pm_runtime_set_active(&pdev->dev);
2378
pm_runtime_enable(&pdev->dev);
2379
pm_runtime_get_noresume(&pdev->dev);
2380
2381
/* Init channels. */
2382
INIT_LIST_HEAD(&atxdmac->dma.channels);
2383
2384
/* Disable all chans and interrupts. */
2385
at_xdmac_off(atxdmac, true);
2386
2387
for (i = 0; i < nr_channels; i++) {
2388
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2389
2390
atchan->chan.device = &atxdmac->dma;
2391
list_add_tail(&atchan->chan.device_node,
2392
&atxdmac->dma.channels);
2393
2394
atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2395
atchan->mask = 1 << i;
2396
2397
spin_lock_init(&atchan->lock);
2398
INIT_LIST_HEAD(&atchan->xfers_list);
2399
INIT_LIST_HEAD(&atchan->free_descs_list);
2400
tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
2401
2402
/* Clear pending interrupts. */
2403
while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2404
cpu_relax();
2405
}
2406
2407
ret = dma_async_device_register(&atxdmac->dma);
2408
if (ret) {
2409
dev_err(&pdev->dev, "fail to register DMA engine device\n");
2410
goto err_pm_disable;
2411
}
2412
2413
ret = of_dma_controller_register(pdev->dev.of_node,
2414
at_xdmac_xlate, atxdmac);
2415
if (ret) {
2416
dev_err(&pdev->dev, "could not register of dma controller\n");
2417
goto err_dma_unregister;
2418
}
2419
2420
dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2421
nr_channels, atxdmac->regs);
2422
2423
at_xdmac_axi_config(pdev);
2424
2425
pm_runtime_put_autosuspend(&pdev->dev);
2426
2427
return 0;
2428
2429
err_dma_unregister:
2430
dma_async_device_unregister(&atxdmac->dma);
2431
err_pm_disable:
2432
pm_runtime_put_noidle(&pdev->dev);
2433
pm_runtime_disable(&pdev->dev);
2434
pm_runtime_set_suspended(&pdev->dev);
2435
pm_runtime_dont_use_autosuspend(&pdev->dev);
2436
err_clk_disable:
2437
clk_disable_unprepare(atxdmac->clk);
2438
err_free_irq:
2439
free_irq(atxdmac->irq, atxdmac);
2440
return ret;
2441
}
2442
2443
static void at_xdmac_remove(struct platform_device *pdev)
2444
{
2445
struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2446
int i;
2447
2448
at_xdmac_off(atxdmac, true);
2449
of_dma_controller_free(pdev->dev.of_node);
2450
dma_async_device_unregister(&atxdmac->dma);
2451
pm_runtime_disable(atxdmac->dev);
2452
pm_runtime_set_suspended(&pdev->dev);
2453
pm_runtime_dont_use_autosuspend(&pdev->dev);
2454
clk_disable_unprepare(atxdmac->clk);
2455
2456
free_irq(atxdmac->irq, atxdmac);
2457
2458
for (i = 0; i < atxdmac->dma.chancnt; i++) {
2459
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2460
2461
tasklet_kill(&atchan->tasklet);
2462
at_xdmac_free_chan_resources(&atchan->chan);
2463
}
2464
}
2465
2466
static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
2467
.prepare = atmel_xdmac_prepare,
2468
SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2469
SET_RUNTIME_PM_OPS(atmel_xdmac_runtime_suspend,
2470
atmel_xdmac_runtime_resume, NULL)
2471
};
2472
2473
static const struct of_device_id atmel_xdmac_dt_ids[] = {
2474
{
2475
.compatible = "atmel,sama5d4-dma",
2476
.data = &at_xdmac_sama5d4_layout,
2477
}, {
2478
.compatible = "microchip,sama7g5-dma",
2479
.data = &at_xdmac_sama7g5_layout,
2480
}, {
2481
/* sentinel */
2482
}
2483
};
2484
MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2485
2486
static struct platform_driver at_xdmac_driver = {
2487
.probe = at_xdmac_probe,
2488
.remove = at_xdmac_remove,
2489
.driver = {
2490
.name = "at_xdmac",
2491
.of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2492
.pm = pm_ptr(&atmel_xdmac_dev_pm_ops),
2493
}
2494
};
2495
2496
static int __init at_xdmac_init(void)
2497
{
2498
return platform_driver_register(&at_xdmac_driver);
2499
}
2500
subsys_initcall(at_xdmac_init);
2501
2502
static void __exit at_xdmac_exit(void)
2503
{
2504
platform_driver_unregister(&at_xdmac_driver);
2505
}
2506
module_exit(at_xdmac_exit);
2507
2508
MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2509
MODULE_AUTHOR("Ludovic Desroches <[email protected]>");
2510
MODULE_LICENSE("GPL");
2511
2512