Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 2018-2020 Christoph Hellwig.
4 : *
5 : * DMA operations that map physical memory directly without using an IOMMU.
6 : */
7 : #include <linux/memblock.h> /* for max_pfn */
8 : #include <linux/export.h>
9 : #include <linux/mm.h>
10 : #include <linux/dma-map-ops.h>
11 : #include <linux/scatterlist.h>
12 : #include <linux/pfn.h>
13 : #include <linux/vmalloc.h>
14 : #include <linux/set_memory.h>
15 : #include <linux/slab.h>
16 : #include "direct.h"
17 :
18 : /*
19 : * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
20 : * it for entirely different regions. In that case the arch code needs to
21 : * override the variable below for dma-direct to work properly.
22 : */
23 : unsigned int zone_dma_bits __ro_after_init = 24;
24 :
25 : static inline dma_addr_t phys_to_dma_direct(struct device *dev,
26 : phys_addr_t phys)
27 : {
28 0 : if (force_dma_unencrypted(dev))
29 : return phys_to_dma_unencrypted(dev, phys);
30 0 : return phys_to_dma(dev, phys);
31 : }
32 :
33 0 : static inline struct page *dma_direct_to_page(struct device *dev,
34 : dma_addr_t dma_addr)
35 : {
36 0 : return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
37 : }
38 :
39 0 : u64 dma_direct_get_required_mask(struct device *dev)
40 : {
41 0 : phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42 0 : u64 max_dma = phys_to_dma_direct(dev, phys);
43 :
44 0 : return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
45 : }
46 :
47 0 : static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
48 : u64 *phys_limit)
49 : {
50 0 : u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
51 :
52 : /*
53 : * Optimistically try the zone that the physical address mask falls
54 : * into first. If that returns memory that isn't actually addressable
55 : * we will fallback to the next lower zone and try again.
56 : *
57 : * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
58 : * zones.
59 : */
60 0 : *phys_limit = dma_to_phys(dev, dma_limit);
61 0 : if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
62 : return GFP_DMA;
63 0 : if (*phys_limit <= DMA_BIT_MASK(32))
64 : return GFP_DMA32;
65 0 : return 0;
66 : }
67 :
68 0 : static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
69 : {
70 0 : dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
71 :
72 0 : if (dma_addr == DMA_MAPPING_ERROR)
73 : return false;
74 0 : return dma_addr + size - 1 <=
75 0 : min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
76 : }
77 :
78 : static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
79 : {
80 0 : if (!force_dma_unencrypted(dev))
81 : return 0;
82 : return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
83 : }
84 :
85 : static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
86 : {
87 : int ret;
88 :
89 0 : if (!force_dma_unencrypted(dev))
90 : return 0;
91 : ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
92 : if (ret)
93 : pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
94 : return ret;
95 : }
96 :
97 : static void __dma_direct_free_pages(struct device *dev, struct page *page,
98 : size_t size)
99 : {
100 0 : if (swiotlb_free(dev, page, size))
101 : return;
102 0 : dma_free_contiguous(dev, page, size);
103 : }
104 :
105 : static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
106 : {
107 : struct page *page = swiotlb_alloc(dev, size);
108 :
109 : if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
110 : swiotlb_free(dev, page, size);
111 : return NULL;
112 : }
113 :
114 : return page;
115 : }
116 :
117 0 : static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
118 : gfp_t gfp)
119 : {
120 0 : int node = dev_to_node(dev);
121 0 : struct page *page = NULL;
122 : u64 phys_limit;
123 :
124 0 : WARN_ON_ONCE(!PAGE_ALIGNED(size));
125 :
126 0 : if (is_swiotlb_for_alloc(dev))
127 : return dma_direct_alloc_swiotlb(dev, size);
128 :
129 0 : gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
130 : &phys_limit);
131 0 : page = dma_alloc_contiguous(dev, size, gfp);
132 : if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
133 : dma_free_contiguous(dev, page, size);
134 : page = NULL;
135 : }
136 : again:
137 : if (!page)
138 0 : page = alloc_pages_node(node, gfp, get_order(size));
139 0 : if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
140 0 : dma_free_contiguous(dev, page, size);
141 0 : page = NULL;
142 :
143 : if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
144 : phys_limit < DMA_BIT_MASK(64) &&
145 : !(gfp & (GFP_DMA32 | GFP_DMA))) {
146 : gfp |= GFP_DMA32;
147 : goto again;
148 : }
149 :
150 : if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
151 : gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
152 : goto again;
153 : }
154 : }
155 :
156 : return page;
157 : }
158 :
159 : /*
160 : * Check if a potentially blocking operations needs to dip into the atomic
161 : * pools for the given device/gfp.
162 : */
163 : static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
164 : {
165 : return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
166 : }
167 :
168 : static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
169 : dma_addr_t *dma_handle, gfp_t gfp)
170 : {
171 : struct page *page;
172 : u64 phys_mask;
173 : void *ret;
174 :
175 : if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
176 : return NULL;
177 :
178 : gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
179 : &phys_mask);
180 : page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
181 : if (!page)
182 : return NULL;
183 : *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
184 : return ret;
185 : }
186 :
187 0 : static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
188 : dma_addr_t *dma_handle, gfp_t gfp)
189 : {
190 : struct page *page;
191 :
192 0 : page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
193 0 : if (!page)
194 : return NULL;
195 :
196 : /* remove any dirty cache lines on the kernel alias */
197 0 : if (!PageHighMem(page))
198 : arch_dma_prep_coherent(page, size);
199 :
200 : /* return the page pointer as the opaque cookie */
201 0 : *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
202 0 : return page;
203 : }
204 :
205 0 : void *dma_direct_alloc(struct device *dev, size_t size,
206 : dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
207 : {
208 0 : bool remap = false, set_uncached = false;
209 : struct page *page;
210 : void *ret;
211 :
212 0 : size = PAGE_ALIGN(size);
213 0 : if (attrs & DMA_ATTR_NO_WARN)
214 0 : gfp |= __GFP_NOWARN;
215 :
216 0 : if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
217 0 : !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
218 0 : return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
219 :
220 0 : if (!dev_is_dma_coherent(dev)) {
221 : /*
222 : * Fallback to the arch handler if it exists. This should
223 : * eventually go away.
224 : */
225 : if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
226 : !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
227 : !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
228 : !is_swiotlb_for_alloc(dev))
229 : return arch_dma_alloc(dev, size, dma_handle, gfp,
230 : attrs);
231 :
232 : /*
233 : * If there is a global pool, always allocate from it for
234 : * non-coherent devices.
235 : */
236 : if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
237 : return dma_alloc_from_global_coherent(dev, size,
238 : dma_handle);
239 :
240 : /*
241 : * Otherwise remap if the architecture is asking for it. But
242 : * given that remapping memory is a blocking operation we'll
243 : * instead have to dip into the atomic pools.
244 : */
245 : remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
246 : if (remap) {
247 : if (dma_direct_use_pool(dev, gfp))
248 : return dma_direct_alloc_from_pool(dev, size,
249 : dma_handle, gfp);
250 : } else {
251 : if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
252 : return NULL;
253 : set_uncached = true;
254 : }
255 : }
256 :
257 : /*
258 : * Decrypting memory may block, so allocate the memory from the atomic
259 : * pools if we can't block.
260 : */
261 0 : if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
262 : return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
263 :
264 : /* we always manually zero the memory once we are done */
265 0 : page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
266 0 : if (!page)
267 : return NULL;
268 :
269 : /*
270 : * dma_alloc_contiguous can return highmem pages depending on a
271 : * combination the cma= arguments and per-arch setup. These need to be
272 : * remapped to return a kernel virtual address.
273 : */
274 0 : if (PageHighMem(page)) {
275 : remap = true;
276 : set_uncached = false;
277 : }
278 :
279 : if (remap) {
280 : pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
281 :
282 : if (force_dma_unencrypted(dev))
283 : prot = pgprot_decrypted(prot);
284 :
285 : /* remove any dirty cache lines on the kernel alias */
286 : arch_dma_prep_coherent(page, size);
287 :
288 : /* create a coherent mapping */
289 : ret = dma_common_contiguous_remap(page, size, prot,
290 : __builtin_return_address(0));
291 : if (!ret)
292 : goto out_free_pages;
293 : } else {
294 0 : ret = page_address(page);
295 0 : if (dma_set_decrypted(dev, ret, size))
296 : goto out_free_pages;
297 : }
298 :
299 0 : memset(ret, 0, size);
300 :
301 : if (set_uncached) {
302 : arch_dma_prep_coherent(page, size);
303 : ret = arch_dma_set_uncached(ret, size);
304 : if (IS_ERR(ret))
305 : goto out_encrypt_pages;
306 : }
307 :
308 0 : *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
309 0 : return ret;
310 :
311 : out_encrypt_pages:
312 : if (dma_set_encrypted(dev, page_address(page), size))
313 : return NULL;
314 : out_free_pages:
315 : __dma_direct_free_pages(dev, page, size);
316 : return NULL;
317 : }
318 :
319 0 : void dma_direct_free(struct device *dev, size_t size,
320 : void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
321 : {
322 0 : unsigned int page_order = get_order(size);
323 :
324 0 : if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
325 0 : !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
326 : /* cpu_addr is a struct page cookie, not a kernel address */
327 0 : dma_free_contiguous(dev, cpu_addr, size);
328 0 : return;
329 : }
330 :
331 : if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
332 : !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
333 : !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
334 0 : !dev_is_dma_coherent(dev) &&
335 : !is_swiotlb_for_alloc(dev)) {
336 : arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
337 : return;
338 : }
339 :
340 : if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
341 : !dev_is_dma_coherent(dev)) {
342 : if (!dma_release_from_global_coherent(page_order, cpu_addr))
343 : WARN_ON_ONCE(1);
344 : return;
345 : }
346 :
347 : /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
348 : if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
349 : dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
350 : return;
351 :
352 0 : if (is_vmalloc_addr(cpu_addr)) {
353 0 : vunmap(cpu_addr);
354 : } else {
355 : if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
356 : arch_dma_clear_uncached(cpu_addr, size);
357 : if (dma_set_encrypted(dev, cpu_addr, 1 << page_order))
358 : return;
359 : }
360 :
361 0 : __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
362 : }
363 :
364 0 : struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
365 : dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
366 : {
367 : struct page *page;
368 : void *ret;
369 :
370 0 : if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
371 : return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
372 :
373 0 : page = __dma_direct_alloc_pages(dev, size, gfp);
374 0 : if (!page)
375 : return NULL;
376 0 : if (PageHighMem(page)) {
377 : /*
378 : * Depending on the cma= arguments and per-arch setup
379 : * dma_alloc_contiguous could return highmem pages.
380 : * Without remapping there is no way to return them here,
381 : * so log an error and fail.
382 : */
383 : dev_info(dev, "Rejecting highmem page from CMA.\n");
384 : goto out_free_pages;
385 : }
386 :
387 0 : ret = page_address(page);
388 0 : if (dma_set_decrypted(dev, ret, size))
389 : goto out_free_pages;
390 0 : memset(ret, 0, size);
391 0 : *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
392 0 : return page;
393 : out_free_pages:
394 : __dma_direct_free_pages(dev, page, size);
395 : return NULL;
396 : }
397 :
398 0 : void dma_direct_free_pages(struct device *dev, size_t size,
399 : struct page *page, dma_addr_t dma_addr,
400 : enum dma_data_direction dir)
401 : {
402 0 : unsigned int page_order = get_order(size);
403 0 : void *vaddr = page_address(page);
404 :
405 : /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
406 : if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
407 : dma_free_from_pool(dev, vaddr, size))
408 : return;
409 :
410 0 : if (dma_set_encrypted(dev, vaddr, 1 << page_order))
411 : return;
412 0 : __dma_direct_free_pages(dev, page, size);
413 : }
414 :
415 : #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
416 : defined(CONFIG_SWIOTLB)
417 : void dma_direct_sync_sg_for_device(struct device *dev,
418 : struct scatterlist *sgl, int nents, enum dma_data_direction dir)
419 : {
420 : struct scatterlist *sg;
421 : int i;
422 :
423 : for_each_sg(sgl, sg, nents, i) {
424 : phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
425 :
426 : if (unlikely(is_swiotlb_buffer(dev, paddr)))
427 : swiotlb_sync_single_for_device(dev, paddr, sg->length,
428 : dir);
429 :
430 : if (!dev_is_dma_coherent(dev))
431 : arch_sync_dma_for_device(paddr, sg->length,
432 : dir);
433 : }
434 : }
435 : #endif
436 :
437 : #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
438 : defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
439 : defined(CONFIG_SWIOTLB)
440 : void dma_direct_sync_sg_for_cpu(struct device *dev,
441 : struct scatterlist *sgl, int nents, enum dma_data_direction dir)
442 : {
443 : struct scatterlist *sg;
444 : int i;
445 :
446 : for_each_sg(sgl, sg, nents, i) {
447 : phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
448 :
449 : if (!dev_is_dma_coherent(dev))
450 : arch_sync_dma_for_cpu(paddr, sg->length, dir);
451 :
452 : if (unlikely(is_swiotlb_buffer(dev, paddr)))
453 : swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
454 : dir);
455 :
456 : if (dir == DMA_FROM_DEVICE)
457 : arch_dma_mark_clean(paddr, sg->length);
458 : }
459 :
460 : if (!dev_is_dma_coherent(dev))
461 : arch_sync_dma_for_cpu_all();
462 : }
463 :
464 : void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
465 : int nents, enum dma_data_direction dir, unsigned long attrs)
466 : {
467 : struct scatterlist *sg;
468 : int i;
469 :
470 : for_each_sg(sgl, sg, nents, i)
471 : dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
472 : attrs);
473 : }
474 : #endif
475 :
476 0 : int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
477 : enum dma_data_direction dir, unsigned long attrs)
478 : {
479 : int i;
480 : struct scatterlist *sg;
481 :
482 0 : for_each_sg(sgl, sg, nents, i) {
483 0 : sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
484 0 : sg->offset, sg->length, dir, attrs);
485 0 : if (sg->dma_address == DMA_MAPPING_ERROR)
486 : goto out_unmap;
487 : sg_dma_len(sg) = sg->length;
488 : }
489 :
490 : return nents;
491 :
492 : out_unmap:
493 : dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
494 : return -EIO;
495 : }
496 :
497 0 : dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
498 : size_t size, enum dma_data_direction dir, unsigned long attrs)
499 : {
500 0 : dma_addr_t dma_addr = paddr;
501 :
502 0 : if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
503 0 : dev_err_once(dev,
504 : "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
505 : &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
506 0 : WARN_ON_ONCE(1);
507 : return DMA_MAPPING_ERROR;
508 : }
509 :
510 : return dma_addr;
511 : }
512 :
513 0 : int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
514 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
515 : unsigned long attrs)
516 : {
517 0 : struct page *page = dma_direct_to_page(dev, dma_addr);
518 : int ret;
519 :
520 0 : ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
521 0 : if (!ret)
522 0 : sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
523 0 : return ret;
524 : }
525 :
526 0 : bool dma_direct_can_mmap(struct device *dev)
527 : {
528 0 : return dev_is_dma_coherent(dev) ||
529 : IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
530 : }
531 :
532 0 : int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
533 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
534 : unsigned long attrs)
535 : {
536 0 : unsigned long user_count = vma_pages(vma);
537 0 : unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
538 0 : unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
539 0 : int ret = -ENXIO;
540 :
541 0 : vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
542 0 : if (force_dma_unencrypted(dev))
543 : vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
544 :
545 : if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
546 : return ret;
547 0 : if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
548 : return ret;
549 :
550 0 : if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
551 : return -ENXIO;
552 0 : return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
553 : user_count << PAGE_SHIFT, vma->vm_page_prot);
554 : }
555 :
556 0 : int dma_direct_supported(struct device *dev, u64 mask)
557 : {
558 0 : u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
559 :
560 : /*
561 : * Because 32-bit DMA masks are so common we expect every architecture
562 : * to be able to satisfy them - either by not supporting more physical
563 : * memory, or by providing a ZONE_DMA32. If neither is the case, the
564 : * architecture needs to use an IOMMU instead of the direct mapping.
565 : */
566 0 : if (mask >= DMA_BIT_MASK(32))
567 : return 1;
568 :
569 : /*
570 : * This check needs to be against the actual bit mask value, so use
571 : * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
572 : * part of the check.
573 : */
574 : if (IS_ENABLED(CONFIG_ZONE_DMA))
575 : min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
576 0 : return mask >= phys_to_dma_unencrypted(dev, min_mask);
577 : }
578 :
579 0 : size_t dma_direct_max_mapping_size(struct device *dev)
580 : {
581 : /* If SWIOTLB is active, use its maximum mapping size */
582 0 : if (is_swiotlb_active(dev) &&
583 : (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
584 : return swiotlb_max_mapping_size(dev);
585 : return SIZE_MAX;
586 : }
587 :
588 0 : bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
589 : {
590 0 : return !dev_is_dma_coherent(dev) ||
591 0 : is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
592 : }
593 :
594 : /**
595 : * dma_direct_set_offset - Assign scalar offset for a single DMA range.
596 : * @dev: device pointer; needed to "own" the alloced memory.
597 : * @cpu_start: beginning of memory region covered by this offset.
598 : * @dma_start: beginning of DMA/PCI region covered by this offset.
599 : * @size: size of the region.
600 : *
601 : * This is for the simple case of a uniform offset which cannot
602 : * be discovered by "dma-ranges".
603 : *
604 : * It returns -ENOMEM if out of memory, -EINVAL if a map
605 : * already exists, 0 otherwise.
606 : *
607 : * Note: any call to this from a driver is a bug. The mapping needs
608 : * to be described by the device tree or other firmware interfaces.
609 : */
610 0 : int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
611 : dma_addr_t dma_start, u64 size)
612 : {
613 : struct bus_dma_region *map;
614 0 : u64 offset = (u64)cpu_start - (u64)dma_start;
615 :
616 0 : if (dev->dma_range_map) {
617 0 : dev_err(dev, "attempt to add DMA range to existing map\n");
618 0 : return -EINVAL;
619 : }
620 :
621 0 : if (!offset)
622 : return 0;
623 :
624 0 : map = kcalloc(2, sizeof(*map), GFP_KERNEL);
625 0 : if (!map)
626 : return -ENOMEM;
627 0 : map[0].cpu_start = cpu_start;
628 0 : map[0].dma_start = dma_start;
629 0 : map[0].offset = offset;
630 0 : map[0].size = size;
631 0 : dev->dma_range_map = map;
632 0 : return 0;
633 : }
|