1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
4
5 #include <linux/sizes.h>
6 #include <linux/string.h>
7 #include <linux/device.h>
8 #include <linux/err.h>
9 #include <linux/dma-debug.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/bug.h>
13 #include <linux/mem_encrypt.h>
14
15 /**
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 *
19 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
20 * forces all pending DMA writes to complete.
21 */
22 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23 /*
24 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
25 * may be weakly ordered, that is that reads and writes may pass each other.
26 */
27 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28 /*
29 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
30 * buffered to improve performance.
31 */
32 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33 /*
34 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
35 * consistent or non-consistent memory as it sees fit.
36 */
37 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38 /*
39 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
40 * virtual mapping for the allocated buffer.
41 */
42 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43 /*
44 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
45 * the CPU cache for the given buffer assuming that it has been already
46 * transferred to 'device' domain.
47 */
48 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49 /*
50 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
51 * in physical memory.
52 */
53 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54 /*
55 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
56 * that it's probably not worth the time to try to allocate memory to in a way
57 * that gives better TLB efficiency.
58 */
59 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
60 /*
61 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
62 * allocation failure reports (similarly to __GFP_NOWARN).
63 */
64 #define DMA_ATTR_NO_WARN (1UL << 8)
65
66 /*
67 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
68 * accessible at an elevated privilege level (and ideally inaccessible or
69 * at least read-only at lesser-privileged levels).
70 */
71 #define DMA_ATTR_PRIVILEGED (1UL << 9)
72
73 /*
74 * A dma_addr_t can hold any valid DMA or bus address for the platform.
75 * It can be given to a device to use as a DMA source or target. A CPU cannot
76 * reference a dma_addr_t directly because there may be translation between
77 * its physical address space and the bus address space.
78 */
79 struct dma_map_ops {
80 void* (*alloc)(struct device *dev, size_t size,
81 dma_addr_t *dma_handle, gfp_t gfp,
82 unsigned long attrs);
83 void (*free)(struct device *dev, size_t size,
84 void *vaddr, dma_addr_t dma_handle,
85 unsigned long attrs);
86 int (*mmap)(struct device *, struct vm_area_struct *,
87 void *, dma_addr_t, size_t,
88 unsigned long attrs);
89
90 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
91 dma_addr_t, size_t, unsigned long attrs);
92
93 dma_addr_t (*map_page)(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction dir,
96 unsigned long attrs);
97 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction dir,
99 unsigned long attrs);
100 /*
101 * map_sg returns 0 on error and a value > 0 on success.
102 * It should never return a value < 0.
103 */
104 int (*map_sg)(struct device *dev, struct scatterlist *sg,
105 int nents, enum dma_data_direction dir,
106 unsigned long attrs);
107 void (*unmap_sg)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir,
110 unsigned long attrs);
111 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
115 size_t size, enum dma_data_direction dir,
116 unsigned long attrs);
117 void (*sync_single_for_cpu)(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir);
120 void (*sync_single_for_device)(struct device *dev,
121 dma_addr_t dma_handle, size_t size,
122 enum dma_data_direction dir);
123 void (*sync_sg_for_cpu)(struct device *dev,
124 struct scatterlist *sg, int nents,
125 enum dma_data_direction dir);
126 void (*sync_sg_for_device)(struct device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction dir);
129 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
130 enum dma_data_direction direction);
131 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
132 int (*dma_supported)(struct device *dev, u64 mask);
133 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
134 u64 (*get_required_mask)(struct device *dev);
135 #endif
136 };
137
138 extern const struct dma_map_ops dma_direct_ops;
139 extern const struct dma_map_ops dma_noncoherent_ops;
140 extern const struct dma_map_ops dma_virt_ops;
141
142 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
143
144 #define DMA_MASK_NONE 0x0ULL
145
valid_dma_direction(int dma_direction)146 static inline int valid_dma_direction(int dma_direction)
147 {
148 return ((dma_direction == DMA_BIDIRECTIONAL) ||
149 (dma_direction == DMA_TO_DEVICE) ||
150 (dma_direction == DMA_FROM_DEVICE));
151 }
152
is_device_dma_capable(struct device * dev)153 static inline int is_device_dma_capable(struct device *dev)
154 {
155 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
156 }
157
158 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
159 /*
160 * These three functions are only for dma allocator.
161 * Don't use them in device drivers.
162 */
163 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
164 dma_addr_t *dma_handle, void **ret);
165 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
166
167 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
168 void *cpu_addr, size_t size, int *ret);
169
170 void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
171 int dma_release_from_global_coherent(int order, void *vaddr);
172 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
173 size_t size, int *ret);
174
175 #else
176 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
177 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
178 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
179
dma_alloc_from_global_coherent(ssize_t size,dma_addr_t * dma_handle)180 static inline void *dma_alloc_from_global_coherent(ssize_t size,
181 dma_addr_t *dma_handle)
182 {
183 return NULL;
184 }
185
dma_release_from_global_coherent(int order,void * vaddr)186 static inline int dma_release_from_global_coherent(int order, void *vaddr)
187 {
188 return 0;
189 }
190
dma_mmap_from_global_coherent(struct vm_area_struct * vma,void * cpu_addr,size_t size,int * ret)191 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
192 void *cpu_addr, size_t size,
193 int *ret)
194 {
195 return 0;
196 }
197 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
198
199 #ifdef CONFIG_HAS_DMA
200 #include <asm/dma-mapping.h>
get_dma_ops(struct device * dev)201 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
202 {
203 if (dev && dev->dma_ops)
204 return dev->dma_ops;
205 return get_arch_dma_ops(dev ? dev->bus : NULL);
206 }
207
set_dma_ops(struct device * dev,const struct dma_map_ops * dma_ops)208 static inline void set_dma_ops(struct device *dev,
209 const struct dma_map_ops *dma_ops)
210 {
211 dev->dma_ops = dma_ops;
212 }
213 #else
214 /*
215 * Define the dma api to allow compilation of dma dependent code.
216 * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA'
217 * in its Kconfig, unless it already depends on <something> || COMPILE_TEST,
218 * where <something> guarantuees the availability of the dma-mapping API.
219 */
get_dma_ops(struct device * dev)220 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
221 {
222 return NULL;
223 }
224 #endif
225
dma_map_single_attrs(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir,unsigned long attrs)226 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
227 size_t size,
228 enum dma_data_direction dir,
229 unsigned long attrs)
230 {
231 const struct dma_map_ops *ops = get_dma_ops(dev);
232 dma_addr_t addr;
233
234 BUG_ON(!valid_dma_direction(dir));
235 addr = ops->map_page(dev, virt_to_page(ptr),
236 offset_in_page(ptr), size,
237 dir, attrs);
238 debug_dma_map_page(dev, virt_to_page(ptr),
239 offset_in_page(ptr), size,
240 dir, addr, true);
241 return addr;
242 }
243
dma_unmap_single_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)244 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
245 size_t size,
246 enum dma_data_direction dir,
247 unsigned long attrs)
248 {
249 const struct dma_map_ops *ops = get_dma_ops(dev);
250
251 BUG_ON(!valid_dma_direction(dir));
252 if (ops->unmap_page)
253 ops->unmap_page(dev, addr, size, dir, attrs);
254 debug_dma_unmap_page(dev, addr, size, dir, true);
255 }
256
257 /*
258 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
259 * It should never return a value < 0.
260 */
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)261 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
262 int nents, enum dma_data_direction dir,
263 unsigned long attrs)
264 {
265 const struct dma_map_ops *ops = get_dma_ops(dev);
266 int ents;
267
268 BUG_ON(!valid_dma_direction(dir));
269 ents = ops->map_sg(dev, sg, nents, dir, attrs);
270 BUG_ON(ents < 0);
271 debug_dma_map_sg(dev, sg, nents, ents, dir);
272
273 return ents;
274 }
275
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)276 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
277 int nents, enum dma_data_direction dir,
278 unsigned long attrs)
279 {
280 const struct dma_map_ops *ops = get_dma_ops(dev);
281
282 BUG_ON(!valid_dma_direction(dir));
283 debug_dma_unmap_sg(dev, sg, nents, dir);
284 if (ops->unmap_sg)
285 ops->unmap_sg(dev, sg, nents, dir, attrs);
286 }
287
dma_map_page_attrs(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)288 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
289 struct page *page,
290 size_t offset, size_t size,
291 enum dma_data_direction dir,
292 unsigned long attrs)
293 {
294 const struct dma_map_ops *ops = get_dma_ops(dev);
295 dma_addr_t addr;
296
297 BUG_ON(!valid_dma_direction(dir));
298 addr = ops->map_page(dev, page, offset, size, dir, attrs);
299 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
300
301 return addr;
302 }
303
dma_unmap_page_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)304 static inline void dma_unmap_page_attrs(struct device *dev,
305 dma_addr_t addr, size_t size,
306 enum dma_data_direction dir,
307 unsigned long attrs)
308 {
309 const struct dma_map_ops *ops = get_dma_ops(dev);
310
311 BUG_ON(!valid_dma_direction(dir));
312 if (ops->unmap_page)
313 ops->unmap_page(dev, addr, size, dir, attrs);
314 debug_dma_unmap_page(dev, addr, size, dir, false);
315 }
316
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)317 static inline dma_addr_t dma_map_resource(struct device *dev,
318 phys_addr_t phys_addr,
319 size_t size,
320 enum dma_data_direction dir,
321 unsigned long attrs)
322 {
323 const struct dma_map_ops *ops = get_dma_ops(dev);
324 dma_addr_t addr;
325
326 BUG_ON(!valid_dma_direction(dir));
327
328 /* Don't allow RAM to be mapped */
329 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
330
331 addr = phys_addr;
332 if (ops->map_resource)
333 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
334
335 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
336
337 return addr;
338 }
339
dma_unmap_resource(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)340 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
341 size_t size, enum dma_data_direction dir,
342 unsigned long attrs)
343 {
344 const struct dma_map_ops *ops = get_dma_ops(dev);
345
346 BUG_ON(!valid_dma_direction(dir));
347 if (ops->unmap_resource)
348 ops->unmap_resource(dev, addr, size, dir, attrs);
349 debug_dma_unmap_resource(dev, addr, size, dir);
350 }
351
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)352 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
353 size_t size,
354 enum dma_data_direction dir)
355 {
356 const struct dma_map_ops *ops = get_dma_ops(dev);
357
358 BUG_ON(!valid_dma_direction(dir));
359 if (ops->sync_single_for_cpu)
360 ops->sync_single_for_cpu(dev, addr, size, dir);
361 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
362 }
363
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)364 static inline void dma_sync_single_for_device(struct device *dev,
365 dma_addr_t addr, size_t size,
366 enum dma_data_direction dir)
367 {
368 const struct dma_map_ops *ops = get_dma_ops(dev);
369
370 BUG_ON(!valid_dma_direction(dir));
371 if (ops->sync_single_for_device)
372 ops->sync_single_for_device(dev, addr, size, dir);
373 debug_dma_sync_single_for_device(dev, addr, size, dir);
374 }
375
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)376 static inline void dma_sync_single_range_for_cpu(struct device *dev,
377 dma_addr_t addr,
378 unsigned long offset,
379 size_t size,
380 enum dma_data_direction dir)
381 {
382 const struct dma_map_ops *ops = get_dma_ops(dev);
383
384 BUG_ON(!valid_dma_direction(dir));
385 if (ops->sync_single_for_cpu)
386 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
387 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
388 }
389
dma_sync_single_range_for_device(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)390 static inline void dma_sync_single_range_for_device(struct device *dev,
391 dma_addr_t addr,
392 unsigned long offset,
393 size_t size,
394 enum dma_data_direction dir)
395 {
396 const struct dma_map_ops *ops = get_dma_ops(dev);
397
398 BUG_ON(!valid_dma_direction(dir));
399 if (ops->sync_single_for_device)
400 ops->sync_single_for_device(dev, addr + offset, size, dir);
401 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
402 }
403
404 static inline void
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)405 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
406 int nelems, enum dma_data_direction dir)
407 {
408 const struct dma_map_ops *ops = get_dma_ops(dev);
409
410 BUG_ON(!valid_dma_direction(dir));
411 if (ops->sync_sg_for_cpu)
412 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
413 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
414 }
415
416 static inline void
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)417 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
418 int nelems, enum dma_data_direction dir)
419 {
420 const struct dma_map_ops *ops = get_dma_ops(dev);
421
422 BUG_ON(!valid_dma_direction(dir));
423 if (ops->sync_sg_for_device)
424 ops->sync_sg_for_device(dev, sg, nelems, dir);
425 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
426
427 }
428
429 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
430 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
431 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
432 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
433 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
434 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
435
436 static inline void
dma_cache_sync(struct device * dev,void * vaddr,size_t size,enum dma_data_direction dir)437 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
438 enum dma_data_direction dir)
439 {
440 const struct dma_map_ops *ops = get_dma_ops(dev);
441
442 BUG_ON(!valid_dma_direction(dir));
443 if (ops->cache_sync)
444 ops->cache_sync(dev, vaddr, size, dir);
445 }
446
447 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
448 void *cpu_addr, dma_addr_t dma_addr, size_t size);
449
450 void *dma_common_contiguous_remap(struct page *page, size_t size,
451 unsigned long vm_flags,
452 pgprot_t prot, const void *caller);
453
454 void *dma_common_pages_remap(struct page **pages, size_t size,
455 unsigned long vm_flags, pgprot_t prot,
456 const void *caller);
457 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
458
459 /**
460 * dma_mmap_attrs - map a coherent DMA allocation into user space
461 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
462 * @vma: vm_area_struct describing requested user mapping
463 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
464 * @handle: device-view address returned from dma_alloc_attrs
465 * @size: size of memory originally requested in dma_alloc_attrs
466 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
467 *
468 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
469 * into user space. The coherent DMA buffer must not be freed by the
470 * driver until the user space mapping has been released.
471 */
472 static inline int
dma_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)473 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
474 dma_addr_t dma_addr, size_t size, unsigned long attrs)
475 {
476 const struct dma_map_ops *ops = get_dma_ops(dev);
477 BUG_ON(!ops);
478 if (ops->mmap)
479 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
480 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
481 }
482
483 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
484
485 int
486 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
487 void *cpu_addr, dma_addr_t dma_addr, size_t size);
488
489 static inline int
dma_get_sgtable_attrs(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)490 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
491 dma_addr_t dma_addr, size_t size,
492 unsigned long attrs)
493 {
494 const struct dma_map_ops *ops = get_dma_ops(dev);
495 BUG_ON(!ops);
496 if (ops->get_sgtable)
497 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
498 attrs);
499 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
500 }
501
502 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
503
504 #ifndef arch_dma_alloc_attrs
505 #define arch_dma_alloc_attrs(dev) (true)
506 #endif
507
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)508 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
509 dma_addr_t *dma_handle, gfp_t flag,
510 unsigned long attrs)
511 {
512 const struct dma_map_ops *ops = get_dma_ops(dev);
513 void *cpu_addr;
514
515 BUG_ON(!ops);
516 WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
517
518 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
519 return cpu_addr;
520
521 /* let the implementation decide on the zone to allocate from: */
522 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
523
524 if (!arch_dma_alloc_attrs(&dev))
525 return NULL;
526 if (!ops->alloc)
527 return NULL;
528
529 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
530 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
531 return cpu_addr;
532 }
533
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)534 static inline void dma_free_attrs(struct device *dev, size_t size,
535 void *cpu_addr, dma_addr_t dma_handle,
536 unsigned long attrs)
537 {
538 const struct dma_map_ops *ops = get_dma_ops(dev);
539
540 BUG_ON(!ops);
541
542 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
543 return;
544 /*
545 * On non-coherent platforms which implement DMA-coherent buffers via
546 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
547 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
548 * sleep on some machines, and b) an indication that the driver is
549 * probably misusing the coherent API anyway.
550 */
551 WARN_ON(irqs_disabled());
552
553 if (!ops->free || !cpu_addr)
554 return;
555
556 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
557 ops->free(dev, size, cpu_addr, dma_handle, attrs);
558 }
559
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)560 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
561 dma_addr_t *dma_handle, gfp_t flag)
562 {
563 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
564 }
565
dma_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)566 static inline void dma_free_coherent(struct device *dev, size_t size,
567 void *cpu_addr, dma_addr_t dma_handle)
568 {
569 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
570 }
571
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)572 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
573 {
574 const struct dma_map_ops *ops = get_dma_ops(dev);
575
576 debug_dma_mapping_error(dev, dma_addr);
577 if (ops->mapping_error)
578 return ops->mapping_error(dev, dma_addr);
579 return 0;
580 }
581
dma_check_mask(struct device * dev,u64 mask)582 static inline void dma_check_mask(struct device *dev, u64 mask)
583 {
584 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
585 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
586 }
587
dma_supported(struct device * dev,u64 mask)588 static inline int dma_supported(struct device *dev, u64 mask)
589 {
590 const struct dma_map_ops *ops = get_dma_ops(dev);
591
592 if (!ops)
593 return 0;
594 if (!ops->dma_supported)
595 return 1;
596 return ops->dma_supported(dev, mask);
597 }
598
599 #ifndef HAVE_ARCH_DMA_SET_MASK
dma_set_mask(struct device * dev,u64 mask)600 static inline int dma_set_mask(struct device *dev, u64 mask)
601 {
602 if (!dev->dma_mask || !dma_supported(dev, mask))
603 return -EIO;
604
605 dma_check_mask(dev, mask);
606
607 *dev->dma_mask = mask;
608 return 0;
609 }
610 #endif
611
dma_get_mask(struct device * dev)612 static inline u64 dma_get_mask(struct device *dev)
613 {
614 if (dev && dev->dma_mask && *dev->dma_mask)
615 return *dev->dma_mask;
616 return DMA_BIT_MASK(32);
617 }
618
619 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
620 int dma_set_coherent_mask(struct device *dev, u64 mask);
621 #else
dma_set_coherent_mask(struct device * dev,u64 mask)622 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
623 {
624 if (!dma_supported(dev, mask))
625 return -EIO;
626
627 dma_check_mask(dev, mask);
628
629 dev->coherent_dma_mask = mask;
630 return 0;
631 }
632 #endif
633
634 /*
635 * Set both the DMA mask and the coherent DMA mask to the same thing.
636 * Note that we don't check the return value from dma_set_coherent_mask()
637 * as the DMA API guarantees that the coherent DMA mask can be set to
638 * the same or smaller than the streaming DMA mask.
639 */
dma_set_mask_and_coherent(struct device * dev,u64 mask)640 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
641 {
642 int rc = dma_set_mask(dev, mask);
643 if (rc == 0)
644 dma_set_coherent_mask(dev, mask);
645 return rc;
646 }
647
648 /*
649 * Similar to the above, except it deals with the case where the device
650 * does not have dev->dma_mask appropriately setup.
651 */
dma_coerce_mask_and_coherent(struct device * dev,u64 mask)652 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
653 {
654 dev->dma_mask = &dev->coherent_dma_mask;
655 return dma_set_mask_and_coherent(dev, mask);
656 }
657
658 extern u64 dma_get_required_mask(struct device *dev);
659
660 #ifndef arch_setup_dma_ops
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)661 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
662 u64 size, const struct iommu_ops *iommu,
663 bool coherent) { }
664 #endif
665
666 #ifndef arch_teardown_dma_ops
arch_teardown_dma_ops(struct device * dev)667 static inline void arch_teardown_dma_ops(struct device *dev) { }
668 #endif
669
dma_get_max_seg_size(struct device * dev)670 static inline unsigned int dma_get_max_seg_size(struct device *dev)
671 {
672 if (dev->dma_parms && dev->dma_parms->max_segment_size)
673 return dev->dma_parms->max_segment_size;
674 return SZ_64K;
675 }
676
dma_set_max_seg_size(struct device * dev,unsigned int size)677 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
678 {
679 if (dev->dma_parms) {
680 dev->dma_parms->max_segment_size = size;
681 return 0;
682 }
683 return -EIO;
684 }
685
dma_get_seg_boundary(struct device * dev)686 static inline unsigned long dma_get_seg_boundary(struct device *dev)
687 {
688 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
689 return dev->dma_parms->segment_boundary_mask;
690 return DMA_BIT_MASK(32);
691 }
692
dma_set_seg_boundary(struct device * dev,unsigned long mask)693 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
694 {
695 if (dev->dma_parms) {
696 dev->dma_parms->segment_boundary_mask = mask;
697 return 0;
698 }
699 return -EIO;
700 }
701
702 #ifndef dma_max_pfn
dma_max_pfn(struct device * dev)703 static inline unsigned long dma_max_pfn(struct device *dev)
704 {
705 return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
706 }
707 #endif
708
dma_zalloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)709 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
710 dma_addr_t *dma_handle, gfp_t flag)
711 {
712 void *ret = dma_alloc_coherent(dev, size, dma_handle,
713 flag | __GFP_ZERO);
714 return ret;
715 }
716
dma_get_cache_alignment(void)717 static inline int dma_get_cache_alignment(void)
718 {
719 #ifdef ARCH_DMA_MINALIGN
720 return ARCH_DMA_MINALIGN;
721 #endif
722 return 1;
723 }
724
725 /* flags for the coherent memory api */
726 #define DMA_MEMORY_EXCLUSIVE 0x01
727
728 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
729 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
730 dma_addr_t device_addr, size_t size, int flags);
731 void dma_release_declared_memory(struct device *dev);
732 void *dma_mark_declared_memory_occupied(struct device *dev,
733 dma_addr_t device_addr, size_t size);
734 #else
735 static inline int
dma_declare_coherent_memory(struct device * dev,phys_addr_t phys_addr,dma_addr_t device_addr,size_t size,int flags)736 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
737 dma_addr_t device_addr, size_t size, int flags)
738 {
739 return -ENOSYS;
740 }
741
742 static inline void
dma_release_declared_memory(struct device * dev)743 dma_release_declared_memory(struct device *dev)
744 {
745 }
746
747 static inline void *
dma_mark_declared_memory_occupied(struct device * dev,dma_addr_t device_addr,size_t size)748 dma_mark_declared_memory_occupied(struct device *dev,
749 dma_addr_t device_addr, size_t size)
750 {
751 return ERR_PTR(-EBUSY);
752 }
753 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
754
755 #ifdef CONFIG_HAS_DMA
756 int dma_configure(struct device *dev);
757 void dma_deconfigure(struct device *dev);
758 #else
dma_configure(struct device * dev)759 static inline int dma_configure(struct device *dev)
760 {
761 return 0;
762 }
763
dma_deconfigure(struct device * dev)764 static inline void dma_deconfigure(struct device *dev) {}
765 #endif
766
767 /*
768 * Managed DMA API
769 */
770 #ifdef CONFIG_HAS_DMA
771 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
772 dma_addr_t *dma_handle, gfp_t gfp);
773 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
774 dma_addr_t dma_handle);
775 #else /* !CONFIG_HAS_DMA */
dmam_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)776 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
777 dma_addr_t *dma_handle, gfp_t gfp)
778 { return NULL; }
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)779 static inline void dmam_free_coherent(struct device *dev, size_t size,
780 void *vaddr, dma_addr_t dma_handle) { }
781 #endif /* !CONFIG_HAS_DMA */
782
783 extern void *dmam_alloc_attrs(struct device *dev, size_t size,
784 dma_addr_t *dma_handle, gfp_t gfp,
785 unsigned long attrs);
786 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
787 extern int dmam_declare_coherent_memory(struct device *dev,
788 phys_addr_t phys_addr,
789 dma_addr_t device_addr, size_t size,
790 int flags);
791 extern void dmam_release_declared_memory(struct device *dev);
792 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
dmam_declare_coherent_memory(struct device * dev,phys_addr_t phys_addr,dma_addr_t device_addr,size_t size,gfp_t gfp)793 static inline int dmam_declare_coherent_memory(struct device *dev,
794 phys_addr_t phys_addr, dma_addr_t device_addr,
795 size_t size, gfp_t gfp)
796 {
797 return 0;
798 }
799
dmam_release_declared_memory(struct device * dev)800 static inline void dmam_release_declared_memory(struct device *dev)
801 {
802 }
803 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
804
dma_alloc_wc(struct device * dev,size_t size,dma_addr_t * dma_addr,gfp_t gfp)805 static inline void *dma_alloc_wc(struct device *dev, size_t size,
806 dma_addr_t *dma_addr, gfp_t gfp)
807 {
808 return dma_alloc_attrs(dev, size, dma_addr, gfp,
809 DMA_ATTR_WRITE_COMBINE);
810 }
811 #ifndef dma_alloc_writecombine
812 #define dma_alloc_writecombine dma_alloc_wc
813 #endif
814
dma_free_wc(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr)815 static inline void dma_free_wc(struct device *dev, size_t size,
816 void *cpu_addr, dma_addr_t dma_addr)
817 {
818 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
819 DMA_ATTR_WRITE_COMBINE);
820 }
821 #ifndef dma_free_writecombine
822 #define dma_free_writecombine dma_free_wc
823 #endif
824
dma_mmap_wc(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size)825 static inline int dma_mmap_wc(struct device *dev,
826 struct vm_area_struct *vma,
827 void *cpu_addr, dma_addr_t dma_addr,
828 size_t size)
829 {
830 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
831 DMA_ATTR_WRITE_COMBINE);
832 }
833 #ifndef dma_mmap_writecombine
834 #define dma_mmap_writecombine dma_mmap_wc
835 #endif
836
837 #ifdef CONFIG_NEED_DMA_MAP_STATE
838 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
839 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
840 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
841 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
842 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
843 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
844 #else
845 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
846 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
847 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
848 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
849 #define dma_unmap_len(PTR, LEN_NAME) (0)
850 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
851 #endif
852
853 #endif
854