Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * arch-independent dma-mapping routines
4 : *
5 : * Copyright (c) 2006 SUSE Linux Products GmbH
6 : * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 : */
8 : #include <linux/memblock.h> /* for max_pfn */
9 : #include <linux/acpi.h>
10 : #include <linux/dma-map-ops.h>
11 : #include <linux/export.h>
12 : #include <linux/gfp.h>
13 : #include <linux/of_device.h>
14 : #include <linux/slab.h>
15 : #include <linux/vmalloc.h>
16 : #include "debug.h"
17 : #include "direct.h"
18 :
19 : bool dma_default_coherent;
20 :
21 : /*
22 : * Managed DMA API
23 : */
24 : struct dma_devres {
25 : size_t size;
26 : void *vaddr;
27 : dma_addr_t dma_handle;
28 : unsigned long attrs;
29 : };
30 :
31 0 : static void dmam_release(struct device *dev, void *res)
32 : {
33 0 : struct dma_devres *this = res;
34 :
35 0 : dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
36 : this->attrs);
37 0 : }
38 :
39 0 : static int dmam_match(struct device *dev, void *res, void *match_data)
40 : {
41 0 : struct dma_devres *this = res, *match = match_data;
42 :
43 0 : if (this->vaddr == match->vaddr) {
44 0 : WARN_ON(this->size != match->size ||
45 : this->dma_handle != match->dma_handle);
46 : return 1;
47 : }
48 : return 0;
49 : }
50 :
51 : /**
52 : * dmam_free_coherent - Managed dma_free_coherent()
53 : * @dev: Device to free coherent memory for
54 : * @size: Size of allocation
55 : * @vaddr: Virtual address of the memory to free
56 : * @dma_handle: DMA handle of the memory to free
57 : *
58 : * Managed dma_free_coherent().
59 : */
60 0 : void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
61 : dma_addr_t dma_handle)
62 : {
63 0 : struct dma_devres match_data = { size, vaddr, dma_handle };
64 :
65 0 : dma_free_coherent(dev, size, vaddr, dma_handle);
66 0 : WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
67 0 : }
68 : EXPORT_SYMBOL(dmam_free_coherent);
69 :
70 : /**
71 : * dmam_alloc_attrs - Managed dma_alloc_attrs()
72 : * @dev: Device to allocate non_coherent memory for
73 : * @size: Size of allocation
74 : * @dma_handle: Out argument for allocated DMA handle
75 : * @gfp: Allocation flags
76 : * @attrs: Flags in the DMA_ATTR_* namespace.
77 : *
78 : * Managed dma_alloc_attrs(). Memory allocated using this function will be
79 : * automatically released on driver detach.
80 : *
81 : * RETURNS:
82 : * Pointer to allocated memory on success, NULL on failure.
83 : */
84 0 : void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
85 : gfp_t gfp, unsigned long attrs)
86 : {
87 : struct dma_devres *dr;
88 : void *vaddr;
89 :
90 0 : dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
91 0 : if (!dr)
92 : return NULL;
93 :
94 0 : vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
95 0 : if (!vaddr) {
96 0 : devres_free(dr);
97 0 : return NULL;
98 : }
99 :
100 0 : dr->vaddr = vaddr;
101 0 : dr->dma_handle = *dma_handle;
102 0 : dr->size = size;
103 0 : dr->attrs = attrs;
104 :
105 0 : devres_add(dev, dr);
106 :
107 0 : return vaddr;
108 : }
109 : EXPORT_SYMBOL(dmam_alloc_attrs);
110 :
111 : static bool dma_go_direct(struct device *dev, dma_addr_t mask,
112 : const struct dma_map_ops *ops)
113 : {
114 : if (likely(!ops))
115 : return true;
116 : #ifdef CONFIG_DMA_OPS_BYPASS
117 : if (dev->dma_ops_bypass)
118 : return min_not_zero(mask, dev->bus_dma_limit) >=
119 : dma_direct_get_required_mask(dev);
120 : #endif
121 : return false;
122 : }
123 :
124 :
125 : /*
126 : * Check if the devices uses a direct mapping for streaming DMA operations.
127 : * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
128 : * enough.
129 : */
130 : static inline bool dma_alloc_direct(struct device *dev,
131 : const struct dma_map_ops *ops)
132 : {
133 0 : return dma_go_direct(dev, dev->coherent_dma_mask, ops);
134 : }
135 :
136 : static inline bool dma_map_direct(struct device *dev,
137 : const struct dma_map_ops *ops)
138 : {
139 0 : return dma_go_direct(dev, *dev->dma_mask, ops);
140 : }
141 :
142 0 : dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
143 : size_t offset, size_t size, enum dma_data_direction dir,
144 : unsigned long attrs)
145 : {
146 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
147 : dma_addr_t addr;
148 :
149 0 : BUG_ON(!valid_dma_direction(dir));
150 :
151 0 : if (WARN_ON_ONCE(!dev->dma_mask))
152 : return DMA_MAPPING_ERROR;
153 :
154 0 : if (dma_map_direct(dev, ops) ||
155 : arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
156 0 : addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
157 : else
158 : addr = ops->map_page(dev, page, offset, size, dir, attrs);
159 0 : debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
160 :
161 0 : return addr;
162 : }
163 : EXPORT_SYMBOL(dma_map_page_attrs);
164 :
165 0 : void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
166 : enum dma_data_direction dir, unsigned long attrs)
167 : {
168 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
169 :
170 0 : BUG_ON(!valid_dma_direction(dir));
171 0 : if (dma_map_direct(dev, ops) ||
172 : arch_dma_unmap_page_direct(dev, addr + size))
173 0 : dma_direct_unmap_page(dev, addr, size, dir, attrs);
174 : else if (ops->unmap_page)
175 : ops->unmap_page(dev, addr, size, dir, attrs);
176 0 : debug_dma_unmap_page(dev, addr, size, dir);
177 0 : }
178 : EXPORT_SYMBOL(dma_unmap_page_attrs);
179 :
180 0 : static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
181 : int nents, enum dma_data_direction dir, unsigned long attrs)
182 : {
183 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
184 : int ents;
185 :
186 0 : BUG_ON(!valid_dma_direction(dir));
187 :
188 0 : if (WARN_ON_ONCE(!dev->dma_mask))
189 : return 0;
190 :
191 0 : if (dma_map_direct(dev, ops) ||
192 : arch_dma_map_sg_direct(dev, sg, nents))
193 0 : ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
194 : else
195 : ents = ops->map_sg(dev, sg, nents, dir, attrs);
196 :
197 0 : if (ents > 0)
198 : debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
199 0 : else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
200 : ents != -EIO))
201 : return -EIO;
202 :
203 : return ents;
204 : }
205 :
206 : /**
207 : * dma_map_sg_attrs - Map the given buffer for DMA
208 : * @dev: The device for which to perform the DMA operation
209 : * @sg: The sg_table object describing the buffer
210 : * @nents: Number of entries to map
211 : * @dir: DMA direction
212 : * @attrs: Optional DMA attributes for the map operation
213 : *
214 : * Maps a buffer described by a scatterlist passed in the sg argument with
215 : * nents segments for the @dir DMA operation by the @dev device.
216 : *
217 : * Returns the number of mapped entries (which can be less than nents)
218 : * on success. Zero is returned for any error.
219 : *
220 : * dma_unmap_sg_attrs() should be used to unmap the buffer with the
221 : * original sg and original nents (not the value returned by this funciton).
222 : */
223 0 : unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
224 : int nents, enum dma_data_direction dir, unsigned long attrs)
225 : {
226 : int ret;
227 :
228 0 : ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
229 0 : if (ret < 0)
230 : return 0;
231 0 : return ret;
232 : }
233 : EXPORT_SYMBOL(dma_map_sg_attrs);
234 :
235 : /**
236 : * dma_map_sgtable - Map the given buffer for DMA
237 : * @dev: The device for which to perform the DMA operation
238 : * @sgt: The sg_table object describing the buffer
239 : * @dir: DMA direction
240 : * @attrs: Optional DMA attributes for the map operation
241 : *
242 : * Maps a buffer described by a scatterlist stored in the given sg_table
243 : * object for the @dir DMA operation by the @dev device. After success, the
244 : * ownership for the buffer is transferred to the DMA domain. One has to
245 : * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
246 : * ownership of the buffer back to the CPU domain before touching the
247 : * buffer by the CPU.
248 : *
249 : * Returns 0 on success or a negative error code on error. The following
250 : * error codes are supported with the given meaning:
251 : *
252 : * -EINVAL An invalid argument, unaligned access or other error
253 : * in usage. Will not succeed if retried.
254 : * -ENOMEM Insufficient resources (like memory or IOVA space) to
255 : * complete the mapping. Should succeed if retried later.
256 : * -EIO Legacy error code with an unknown meaning. eg. this is
257 : * returned if a lower level call returned DMA_MAPPING_ERROR.
258 : */
259 0 : int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
260 : enum dma_data_direction dir, unsigned long attrs)
261 : {
262 : int nents;
263 :
264 0 : nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
265 0 : if (nents < 0)
266 : return nents;
267 0 : sgt->nents = nents;
268 0 : return 0;
269 : }
270 : EXPORT_SYMBOL_GPL(dma_map_sgtable);
271 :
272 0 : void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
273 : int nents, enum dma_data_direction dir,
274 : unsigned long attrs)
275 : {
276 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
277 :
278 0 : BUG_ON(!valid_dma_direction(dir));
279 0 : debug_dma_unmap_sg(dev, sg, nents, dir);
280 0 : if (dma_map_direct(dev, ops) ||
281 : arch_dma_unmap_sg_direct(dev, sg, nents))
282 : dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
283 : else if (ops->unmap_sg)
284 : ops->unmap_sg(dev, sg, nents, dir, attrs);
285 0 : }
286 : EXPORT_SYMBOL(dma_unmap_sg_attrs);
287 :
288 0 : dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
289 : size_t size, enum dma_data_direction dir, unsigned long attrs)
290 : {
291 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
292 0 : dma_addr_t addr = DMA_MAPPING_ERROR;
293 :
294 0 : BUG_ON(!valid_dma_direction(dir));
295 :
296 0 : if (WARN_ON_ONCE(!dev->dma_mask))
297 : return DMA_MAPPING_ERROR;
298 :
299 0 : if (dma_map_direct(dev, ops))
300 0 : addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
301 : else if (ops->map_resource)
302 : addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
303 :
304 0 : debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
305 0 : return addr;
306 : }
307 : EXPORT_SYMBOL(dma_map_resource);
308 :
309 0 : void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
310 : enum dma_data_direction dir, unsigned long attrs)
311 : {
312 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
313 :
314 0 : BUG_ON(!valid_dma_direction(dir));
315 0 : if (!dma_map_direct(dev, ops) && ops->unmap_resource)
316 : ops->unmap_resource(dev, addr, size, dir, attrs);
317 0 : debug_dma_unmap_resource(dev, addr, size, dir);
318 0 : }
319 : EXPORT_SYMBOL(dma_unmap_resource);
320 :
321 0 : void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
322 : enum dma_data_direction dir)
323 : {
324 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
325 :
326 0 : BUG_ON(!valid_dma_direction(dir));
327 0 : if (dma_map_direct(dev, ops))
328 0 : dma_direct_sync_single_for_cpu(dev, addr, size, dir);
329 : else if (ops->sync_single_for_cpu)
330 : ops->sync_single_for_cpu(dev, addr, size, dir);
331 0 : debug_dma_sync_single_for_cpu(dev, addr, size, dir);
332 0 : }
333 : EXPORT_SYMBOL(dma_sync_single_for_cpu);
334 :
335 0 : void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
336 : size_t size, enum dma_data_direction dir)
337 : {
338 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
339 :
340 0 : BUG_ON(!valid_dma_direction(dir));
341 0 : if (dma_map_direct(dev, ops))
342 0 : dma_direct_sync_single_for_device(dev, addr, size, dir);
343 : else if (ops->sync_single_for_device)
344 : ops->sync_single_for_device(dev, addr, size, dir);
345 0 : debug_dma_sync_single_for_device(dev, addr, size, dir);
346 0 : }
347 : EXPORT_SYMBOL(dma_sync_single_for_device);
348 :
349 0 : void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
350 : int nelems, enum dma_data_direction dir)
351 : {
352 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
353 :
354 0 : BUG_ON(!valid_dma_direction(dir));
355 0 : if (dma_map_direct(dev, ops))
356 : dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
357 : else if (ops->sync_sg_for_cpu)
358 : ops->sync_sg_for_cpu(dev, sg, nelems, dir);
359 0 : debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
360 0 : }
361 : EXPORT_SYMBOL(dma_sync_sg_for_cpu);
362 :
363 0 : void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
364 : int nelems, enum dma_data_direction dir)
365 : {
366 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
367 :
368 0 : BUG_ON(!valid_dma_direction(dir));
369 0 : if (dma_map_direct(dev, ops))
370 : dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
371 : else if (ops->sync_sg_for_device)
372 : ops->sync_sg_for_device(dev, sg, nelems, dir);
373 0 : debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
374 0 : }
375 : EXPORT_SYMBOL(dma_sync_sg_for_device);
376 :
377 : /*
378 : * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
379 : * that the intention is to allow exporting memory allocated via the
380 : * coherent DMA APIs through the dma_buf API, which only accepts a
381 : * scattertable. This presents a couple of problems:
382 : * 1. Not all memory allocated via the coherent DMA APIs is backed by
383 : * a struct page
384 : * 2. Passing coherent DMA memory into the streaming APIs is not allowed
385 : * as we will try to flush the memory through a different alias to that
386 : * actually being used (and the flushes are redundant.)
387 : */
388 0 : int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
389 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
390 : unsigned long attrs)
391 : {
392 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
393 :
394 0 : if (dma_alloc_direct(dev, ops))
395 0 : return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
396 : size, attrs);
397 : if (!ops->get_sgtable)
398 : return -ENXIO;
399 : return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
400 : }
401 : EXPORT_SYMBOL(dma_get_sgtable_attrs);
402 :
403 : #ifdef CONFIG_MMU
404 : /*
405 : * Return the page attributes used for mapping dma_alloc_* memory, either in
406 : * kernel space if remapping is needed, or to userspace through dma_mmap_*.
407 : */
408 0 : pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
409 : {
410 0 : if (dev_is_dma_coherent(dev))
411 0 : return prot;
412 : #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
413 : if (attrs & DMA_ATTR_WRITE_COMBINE)
414 : return pgprot_writecombine(prot);
415 : #endif
416 : return pgprot_dmacoherent(prot);
417 : }
418 : #endif /* CONFIG_MMU */
419 :
420 : /**
421 : * dma_can_mmap - check if a given device supports dma_mmap_*
422 : * @dev: device to check
423 : *
424 : * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
425 : * map DMA allocations to userspace.
426 : */
427 0 : bool dma_can_mmap(struct device *dev)
428 : {
429 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
430 :
431 0 : if (dma_alloc_direct(dev, ops))
432 0 : return dma_direct_can_mmap(dev);
433 : return ops->mmap != NULL;
434 : }
435 : EXPORT_SYMBOL_GPL(dma_can_mmap);
436 :
437 : /**
438 : * dma_mmap_attrs - map a coherent DMA allocation into user space
439 : * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
440 : * @vma: vm_area_struct describing requested user mapping
441 : * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
442 : * @dma_addr: device-view address returned from dma_alloc_attrs
443 : * @size: size of memory originally requested in dma_alloc_attrs
444 : * @attrs: attributes of mapping properties requested in dma_alloc_attrs
445 : *
446 : * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
447 : * space. The coherent DMA buffer must not be freed by the driver until the
448 : * user space mapping has been released.
449 : */
450 0 : int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
451 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
452 : unsigned long attrs)
453 : {
454 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
455 :
456 0 : if (dma_alloc_direct(dev, ops))
457 0 : return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
458 : attrs);
459 : if (!ops->mmap)
460 : return -ENXIO;
461 : return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
462 : }
463 : EXPORT_SYMBOL(dma_mmap_attrs);
464 :
465 0 : u64 dma_get_required_mask(struct device *dev)
466 : {
467 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
468 :
469 0 : if (dma_alloc_direct(dev, ops))
470 0 : return dma_direct_get_required_mask(dev);
471 : if (ops->get_required_mask)
472 : return ops->get_required_mask(dev);
473 :
474 : /*
475 : * We require every DMA ops implementation to at least support a 32-bit
476 : * DMA mask (and use bounce buffering if that isn't supported in
477 : * hardware). As the direct mapping code has its own routine to
478 : * actually report an optimal mask we default to 32-bit here as that
479 : * is the right thing for most IOMMUs, and at least not actively
480 : * harmful in general.
481 : */
482 : return DMA_BIT_MASK(32);
483 : }
484 : EXPORT_SYMBOL_GPL(dma_get_required_mask);
485 :
486 0 : void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
487 : gfp_t flag, unsigned long attrs)
488 : {
489 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
490 : void *cpu_addr;
491 :
492 0 : WARN_ON_ONCE(!dev->coherent_dma_mask);
493 :
494 : if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
495 : return cpu_addr;
496 :
497 : /* let the implementation decide on the zone to allocate from: */
498 0 : flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
499 :
500 0 : if (dma_alloc_direct(dev, ops))
501 0 : cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
502 : else if (ops->alloc)
503 : cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
504 : else
505 : return NULL;
506 :
507 0 : debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
508 : return cpu_addr;
509 : }
510 : EXPORT_SYMBOL(dma_alloc_attrs);
511 :
512 0 : void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
513 : dma_addr_t dma_handle, unsigned long attrs)
514 : {
515 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
516 :
517 : if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
518 : return;
519 : /*
520 : * On non-coherent platforms which implement DMA-coherent buffers via
521 : * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
522 : * this far in IRQ context is a) at risk of a BUG_ON() or trying to
523 : * sleep on some machines, and b) an indication that the driver is
524 : * probably misusing the coherent API anyway.
525 : */
526 0 : WARN_ON(irqs_disabled());
527 :
528 0 : if (!cpu_addr)
529 : return;
530 :
531 0 : debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
532 0 : if (dma_alloc_direct(dev, ops))
533 0 : dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
534 : else if (ops->free)
535 : ops->free(dev, size, cpu_addr, dma_handle, attrs);
536 : }
537 : EXPORT_SYMBOL(dma_free_attrs);
538 :
539 0 : static struct page *__dma_alloc_pages(struct device *dev, size_t size,
540 : dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
541 : {
542 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
543 :
544 0 : if (WARN_ON_ONCE(!dev->coherent_dma_mask))
545 : return NULL;
546 0 : if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
547 : return NULL;
548 :
549 0 : size = PAGE_ALIGN(size);
550 0 : if (dma_alloc_direct(dev, ops))
551 0 : return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
552 : if (!ops->alloc_pages)
553 : return NULL;
554 : return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
555 : }
556 :
557 0 : struct page *dma_alloc_pages(struct device *dev, size_t size,
558 : dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
559 : {
560 0 : struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
561 :
562 : if (page)
563 : debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
564 0 : return page;
565 : }
566 : EXPORT_SYMBOL_GPL(dma_alloc_pages);
567 :
568 : static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
569 : dma_addr_t dma_handle, enum dma_data_direction dir)
570 : {
571 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
572 :
573 0 : size = PAGE_ALIGN(size);
574 0 : if (dma_alloc_direct(dev, ops))
575 0 : dma_direct_free_pages(dev, size, page, dma_handle, dir);
576 : else if (ops->free_pages)
577 : ops->free_pages(dev, size, page, dma_handle, dir);
578 : }
579 :
580 0 : void dma_free_pages(struct device *dev, size_t size, struct page *page,
581 : dma_addr_t dma_handle, enum dma_data_direction dir)
582 : {
583 0 : debug_dma_unmap_page(dev, dma_handle, size, dir);
584 0 : __dma_free_pages(dev, size, page, dma_handle, dir);
585 0 : }
586 : EXPORT_SYMBOL_GPL(dma_free_pages);
587 :
588 0 : int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
589 : size_t size, struct page *page)
590 : {
591 0 : unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
592 :
593 0 : if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
594 : return -ENXIO;
595 0 : return remap_pfn_range(vma, vma->vm_start,
596 0 : page_to_pfn(page) + vma->vm_pgoff,
597 0 : vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
598 : }
599 : EXPORT_SYMBOL_GPL(dma_mmap_pages);
600 :
601 0 : static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
602 : enum dma_data_direction dir, gfp_t gfp)
603 : {
604 : struct sg_table *sgt;
605 : struct page *page;
606 :
607 0 : sgt = kmalloc(sizeof(*sgt), gfp);
608 0 : if (!sgt)
609 : return NULL;
610 0 : if (sg_alloc_table(sgt, 1, gfp))
611 : goto out_free_sgt;
612 0 : page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
613 0 : if (!page)
614 : goto out_free_table;
615 0 : sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
616 : sg_dma_len(sgt->sgl) = sgt->sgl->length;
617 0 : return sgt;
618 : out_free_table:
619 0 : sg_free_table(sgt);
620 : out_free_sgt:
621 0 : kfree(sgt);
622 0 : return NULL;
623 : }
624 :
625 0 : struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
626 : enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
627 : {
628 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
629 : struct sg_table *sgt;
630 :
631 0 : if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
632 : return NULL;
633 :
634 : if (ops && ops->alloc_noncontiguous)
635 : sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
636 : else
637 0 : sgt = alloc_single_sgt(dev, size, dir, gfp);
638 :
639 0 : if (sgt) {
640 0 : sgt->nents = 1;
641 0 : debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
642 : }
643 : return sgt;
644 : }
645 : EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
646 :
647 0 : static void free_single_sgt(struct device *dev, size_t size,
648 : struct sg_table *sgt, enum dma_data_direction dir)
649 : {
650 0 : __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
651 : dir);
652 0 : sg_free_table(sgt);
653 0 : kfree(sgt);
654 0 : }
655 :
656 0 : void dma_free_noncontiguous(struct device *dev, size_t size,
657 : struct sg_table *sgt, enum dma_data_direction dir)
658 : {
659 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
660 :
661 0 : debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
662 : if (ops && ops->free_noncontiguous)
663 : ops->free_noncontiguous(dev, size, sgt, dir);
664 : else
665 0 : free_single_sgt(dev, size, sgt, dir);
666 0 : }
667 : EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
668 :
669 0 : void *dma_vmap_noncontiguous(struct device *dev, size_t size,
670 : struct sg_table *sgt)
671 : {
672 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
673 0 : unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
674 :
675 : if (ops && ops->alloc_noncontiguous)
676 : return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
677 0 : return page_address(sg_page(sgt->sgl));
678 : }
679 : EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
680 :
681 0 : void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
682 : {
683 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
684 :
685 : if (ops && ops->alloc_noncontiguous)
686 : vunmap(vaddr);
687 0 : }
688 : EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
689 :
690 0 : int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
691 : size_t size, struct sg_table *sgt)
692 : {
693 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
694 :
695 : if (ops && ops->alloc_noncontiguous) {
696 : unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
697 :
698 : if (vma->vm_pgoff >= count ||
699 : vma_pages(vma) > count - vma->vm_pgoff)
700 : return -ENXIO;
701 : return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
702 : }
703 0 : return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
704 : }
705 : EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
706 :
707 0 : int dma_supported(struct device *dev, u64 mask)
708 : {
709 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
710 :
711 : /*
712 : * ->dma_supported sets the bypass flag, so we must always call
713 : * into the method here unless the device is truly direct mapped.
714 : */
715 : if (!ops)
716 0 : return dma_direct_supported(dev, mask);
717 : if (!ops->dma_supported)
718 : return 1;
719 : return ops->dma_supported(dev, mask);
720 : }
721 : EXPORT_SYMBOL(dma_supported);
722 :
723 : #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
724 : void arch_dma_set_mask(struct device *dev, u64 mask);
725 : #else
726 : #define arch_dma_set_mask(dev, mask) do { } while (0)
727 : #endif
728 :
729 0 : int dma_set_mask(struct device *dev, u64 mask)
730 : {
731 : /*
732 : * Truncate the mask to the actually supported dma_addr_t width to
733 : * avoid generating unsupportable addresses.
734 : */
735 0 : mask = (dma_addr_t)mask;
736 :
737 0 : if (!dev->dma_mask || !dma_supported(dev, mask))
738 : return -EIO;
739 :
740 : arch_dma_set_mask(dev, mask);
741 0 : *dev->dma_mask = mask;
742 0 : return 0;
743 : }
744 : EXPORT_SYMBOL(dma_set_mask);
745 :
746 0 : int dma_set_coherent_mask(struct device *dev, u64 mask)
747 : {
748 : /*
749 : * Truncate the mask to the actually supported dma_addr_t width to
750 : * avoid generating unsupportable addresses.
751 : */
752 0 : mask = (dma_addr_t)mask;
753 :
754 0 : if (!dma_supported(dev, mask))
755 : return -EIO;
756 :
757 0 : dev->coherent_dma_mask = mask;
758 0 : return 0;
759 : }
760 : EXPORT_SYMBOL(dma_set_coherent_mask);
761 :
762 0 : size_t dma_max_mapping_size(struct device *dev)
763 : {
764 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
765 0 : size_t size = SIZE_MAX;
766 :
767 0 : if (dma_map_direct(dev, ops))
768 0 : size = dma_direct_max_mapping_size(dev);
769 : else if (ops && ops->max_mapping_size)
770 : size = ops->max_mapping_size(dev);
771 :
772 0 : return size;
773 : }
774 : EXPORT_SYMBOL_GPL(dma_max_mapping_size);
775 :
776 0 : bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
777 : {
778 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
779 :
780 0 : if (dma_map_direct(dev, ops))
781 0 : return dma_direct_need_sync(dev, dma_addr);
782 : return ops->sync_single_for_cpu || ops->sync_single_for_device;
783 : }
784 : EXPORT_SYMBOL_GPL(dma_need_sync);
785 :
786 0 : unsigned long dma_get_merge_boundary(struct device *dev)
787 : {
788 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
789 :
790 : if (!ops || !ops->get_merge_boundary)
791 : return 0; /* can't merge */
792 :
793 : return ops->get_merge_boundary(dev);
794 : }
795 : EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
|