Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/dma/direct.h
29265 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Copyright (C) 2018 Christoph Hellwig.
4
*
5
* DMA operations that map physical memory directly without using an IOMMU.
6
*/
7
#ifndef _KERNEL_DMA_DIRECT_H
8
#define _KERNEL_DMA_DIRECT_H
9
10
#include <linux/dma-direct.h>
11
#include <linux/memremap.h>
12
13
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
14
void *cpu_addr, dma_addr_t dma_addr, size_t size,
15
unsigned long attrs);
16
bool dma_direct_can_mmap(struct device *dev);
17
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
18
void *cpu_addr, dma_addr_t dma_addr, size_t size,
19
unsigned long attrs);
20
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
21
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
22
enum dma_data_direction dir, unsigned long attrs);
23
bool dma_direct_all_ram_mapped(struct device *dev);
24
size_t dma_direct_max_mapping_size(struct device *dev);
25
26
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
27
defined(CONFIG_SWIOTLB)
28
void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
29
int nents, enum dma_data_direction dir);
30
#else
31
static inline void dma_direct_sync_sg_for_device(struct device *dev,
32
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
33
{
34
}
35
#endif
36
37
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
38
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
39
defined(CONFIG_SWIOTLB)
40
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
41
int nents, enum dma_data_direction dir, unsigned long attrs);
42
void dma_direct_sync_sg_for_cpu(struct device *dev,
43
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
44
#else
45
static inline void dma_direct_unmap_sg(struct device *dev,
46
struct scatterlist *sgl, int nents, enum dma_data_direction dir,
47
unsigned long attrs)
48
{
49
}
50
static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
51
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
52
{
53
}
54
#endif
55
56
static inline void dma_direct_sync_single_for_device(struct device *dev,
57
dma_addr_t addr, size_t size, enum dma_data_direction dir)
58
{
59
phys_addr_t paddr = dma_to_phys(dev, addr);
60
61
swiotlb_sync_single_for_device(dev, paddr, size, dir);
62
63
if (!dev_is_dma_coherent(dev))
64
arch_sync_dma_for_device(paddr, size, dir);
65
}
66
67
static inline void dma_direct_sync_single_for_cpu(struct device *dev,
68
dma_addr_t addr, size_t size, enum dma_data_direction dir)
69
{
70
phys_addr_t paddr = dma_to_phys(dev, addr);
71
72
if (!dev_is_dma_coherent(dev)) {
73
arch_sync_dma_for_cpu(paddr, size, dir);
74
arch_sync_dma_for_cpu_all();
75
}
76
77
swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
78
79
if (dir == DMA_FROM_DEVICE)
80
arch_dma_mark_clean(paddr, size);
81
}
82
83
static inline dma_addr_t dma_direct_map_page(struct device *dev,
84
struct page *page, unsigned long offset, size_t size,
85
enum dma_data_direction dir, unsigned long attrs)
86
{
87
phys_addr_t phys = page_to_phys(page) + offset;
88
dma_addr_t dma_addr = phys_to_dma(dev, phys);
89
90
if (is_swiotlb_force_bounce(dev)) {
91
if (is_pci_p2pdma_page(page))
92
return DMA_MAPPING_ERROR;
93
return swiotlb_map(dev, phys, size, dir, attrs);
94
}
95
96
if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
97
dma_kmalloc_needs_bounce(dev, size, dir)) {
98
if (is_pci_p2pdma_page(page))
99
return DMA_MAPPING_ERROR;
100
if (is_swiotlb_active(dev))
101
return swiotlb_map(dev, phys, size, dir, attrs);
102
103
dev_WARN_ONCE(dev, 1,
104
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
105
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
106
return DMA_MAPPING_ERROR;
107
}
108
109
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
110
arch_sync_dma_for_device(phys, size, dir);
111
return dma_addr;
112
}
113
114
static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
115
size_t size, enum dma_data_direction dir, unsigned long attrs)
116
{
117
phys_addr_t phys = dma_to_phys(dev, addr);
118
119
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
120
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
121
122
swiotlb_tbl_unmap_single(dev, phys, size, dir,
123
attrs | DMA_ATTR_SKIP_CPU_SYNC);
124
}
125
#endif /* _KERNEL_DMA_DIRECT_H */
126
127