Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : #include <linux/kernel.h>
3 : #include <linux/errno.h>
4 : #include <linux/err.h>
5 : #include <linux/spinlock.h>
6 :
7 : #include <linux/mm.h>
8 : #include <linux/memremap.h>
9 : #include <linux/pagemap.h>
10 : #include <linux/rmap.h>
11 : #include <linux/swap.h>
12 : #include <linux/swapops.h>
13 : #include <linux/secretmem.h>
14 :
15 : #include <linux/sched/signal.h>
16 : #include <linux/rwsem.h>
17 : #include <linux/hugetlb.h>
18 : #include <linux/migrate.h>
19 : #include <linux/mm_inline.h>
20 : #include <linux/sched/mm.h>
21 :
22 : #include <asm/mmu_context.h>
23 : #include <asm/tlbflush.h>
24 :
25 : #include "internal.h"
26 :
27 : struct follow_page_context {
28 : struct dev_pagemap *pgmap;
29 : unsigned int page_mask;
30 : };
31 :
32 : /*
33 : * Return the folio with ref appropriately incremented,
34 : * or NULL if that failed.
35 : */
36 0 : static inline struct folio *try_get_folio(struct page *page, int refs)
37 : {
38 : struct folio *folio;
39 :
40 : retry:
41 0 : folio = page_folio(page);
42 0 : if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
43 : return NULL;
44 0 : if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
45 : return NULL;
46 :
47 : /*
48 : * At this point we have a stable reference to the folio; but it
49 : * could be that between calling page_folio() and the refcount
50 : * increment, the folio was split, in which case we'd end up
51 : * holding a reference on a folio that has nothing to do with the page
52 : * we were given anymore.
53 : * So now that the folio is stable, recheck that the page still
54 : * belongs to this folio.
55 : */
56 0 : if (unlikely(page_folio(page) != folio)) {
57 : folio_put_refs(folio, refs);
58 : goto retry;
59 : }
60 :
61 : return folio;
62 : }
63 :
64 : /**
65 : * try_grab_folio() - Attempt to get or pin a folio.
66 : * @page: pointer to page to be grabbed
67 : * @refs: the value to (effectively) add to the folio's refcount
68 : * @flags: gup flags: these are the FOLL_* flag values.
69 : *
70 : * "grab" names in this file mean, "look at flags to decide whether to use
71 : * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
72 : *
73 : * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
74 : * same time. (That's true throughout the get_user_pages*() and
75 : * pin_user_pages*() APIs.) Cases:
76 : *
77 : * FOLL_GET: folio's refcount will be incremented by @refs.
78 : *
79 : * FOLL_PIN on large folios: folio's refcount will be incremented by
80 : * @refs, and its compound_pincount will be incremented by @refs.
81 : *
82 : * FOLL_PIN on single-page folios: folio's refcount will be incremented by
83 : * @refs * GUP_PIN_COUNTING_BIAS.
84 : *
85 : * Return: The folio containing @page (with refcount appropriately
86 : * incremented) for success, or NULL upon failure. If neither FOLL_GET
87 : * nor FOLL_PIN was set, that's considered failure, and furthermore,
88 : * a likely bug in the caller, so a warning is also emitted.
89 : */
90 0 : struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
91 : {
92 0 : if (flags & FOLL_GET)
93 0 : return try_get_folio(page, refs);
94 0 : else if (flags & FOLL_PIN) {
95 : struct folio *folio;
96 :
97 : /*
98 : * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
99 : * right zone, so fail and let the caller fall back to the slow
100 : * path.
101 : */
102 0 : if (unlikely((flags & FOLL_LONGTERM) &&
103 : !is_pinnable_page(page)))
104 : return NULL;
105 :
106 : /*
107 : * CAUTION: Don't use compound_head() on the page before this
108 : * point, the result won't be stable.
109 : */
110 0 : folio = try_get_folio(page, refs);
111 0 : if (!folio)
112 : return NULL;
113 :
114 : /*
115 : * When pinning a large folio, use an exact count to track it.
116 : *
117 : * However, be sure to *also* increment the normal folio
118 : * refcount field at least once, so that the folio really
119 : * is pinned. That's why the refcount from the earlier
120 : * try_get_folio() is left intact.
121 : */
122 0 : if (folio_test_large(folio))
123 0 : atomic_add(refs, folio_pincount_ptr(folio));
124 : else
125 0 : folio_ref_add(folio,
126 0 : refs * (GUP_PIN_COUNTING_BIAS - 1));
127 0 : node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
128 :
129 0 : return folio;
130 : }
131 :
132 0 : WARN_ON_ONCE(1);
133 : return NULL;
134 : }
135 :
136 0 : static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
137 : {
138 0 : if (flags & FOLL_PIN) {
139 0 : node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
140 0 : if (folio_test_large(folio))
141 0 : atomic_sub(refs, folio_pincount_ptr(folio));
142 : else
143 0 : refs *= GUP_PIN_COUNTING_BIAS;
144 : }
145 :
146 0 : folio_put_refs(folio, refs);
147 0 : }
148 :
149 : /**
150 : * try_grab_page() - elevate a page's refcount by a flag-dependent amount
151 : * @page: pointer to page to be grabbed
152 : * @flags: gup flags: these are the FOLL_* flag values.
153 : *
154 : * This might not do anything at all, depending on the flags argument.
155 : *
156 : * "grab" names in this file mean, "look at flags to decide whether to use
157 : * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
158 : *
159 : * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
160 : * time. Cases: please see the try_grab_folio() documentation, with
161 : * "refs=1".
162 : *
163 : * Return: true for success, or if no action was required (if neither FOLL_PIN
164 : * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
165 : * FOLL_PIN was set, but the page could not be grabbed.
166 : */
167 0 : bool __must_check try_grab_page(struct page *page, unsigned int flags)
168 : {
169 0 : struct folio *folio = page_folio(page);
170 :
171 0 : WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
172 0 : if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
173 : return false;
174 :
175 0 : if (flags & FOLL_GET)
176 : folio_ref_inc(folio);
177 0 : else if (flags & FOLL_PIN) {
178 : /*
179 : * Similar to try_grab_folio(): be sure to *also*
180 : * increment the normal page refcount field at least once,
181 : * so that the page really is pinned.
182 : */
183 0 : if (folio_test_large(folio)) {
184 0 : folio_ref_add(folio, 1);
185 0 : atomic_add(1, folio_pincount_ptr(folio));
186 : } else {
187 : folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
188 : }
189 :
190 0 : node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
191 : }
192 :
193 : return true;
194 : }
195 :
196 : /**
197 : * unpin_user_page() - release a dma-pinned page
198 : * @page: pointer to page to be released
199 : *
200 : * Pages that were pinned via pin_user_pages*() must be released via either
201 : * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
202 : * that such pages can be separately tracked and uniquely handled. In
203 : * particular, interactions with RDMA and filesystems need special handling.
204 : */
205 0 : void unpin_user_page(struct page *page)
206 : {
207 0 : gup_put_folio(page_folio(page), 1, FOLL_PIN);
208 0 : }
209 : EXPORT_SYMBOL(unpin_user_page);
210 :
211 0 : static inline struct folio *gup_folio_range_next(struct page *start,
212 : unsigned long npages, unsigned long i, unsigned int *ntails)
213 : {
214 0 : struct page *next = nth_page(start, i);
215 0 : struct folio *folio = page_folio(next);
216 0 : unsigned int nr = 1;
217 :
218 0 : if (folio_test_large(folio))
219 0 : nr = min_t(unsigned int, npages - i,
220 : folio_nr_pages(folio) - folio_page_idx(folio, next));
221 :
222 0 : *ntails = nr;
223 0 : return folio;
224 : }
225 :
226 0 : static inline struct folio *gup_folio_next(struct page **list,
227 : unsigned long npages, unsigned long i, unsigned int *ntails)
228 : {
229 0 : struct folio *folio = page_folio(list[i]);
230 : unsigned int nr;
231 :
232 0 : for (nr = i + 1; nr < npages; nr++) {
233 0 : if (page_folio(list[nr]) != folio)
234 : break;
235 : }
236 :
237 0 : *ntails = nr - i;
238 0 : return folio;
239 : }
240 :
241 : /**
242 : * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
243 : * @pages: array of pages to be maybe marked dirty, and definitely released.
244 : * @npages: number of pages in the @pages array.
245 : * @make_dirty: whether to mark the pages dirty
246 : *
247 : * "gup-pinned page" refers to a page that has had one of the get_user_pages()
248 : * variants called on that page.
249 : *
250 : * For each page in the @pages array, make that page (or its head page, if a
251 : * compound page) dirty, if @make_dirty is true, and if the page was previously
252 : * listed as clean. In any case, releases all pages using unpin_user_page(),
253 : * possibly via unpin_user_pages(), for the non-dirty case.
254 : *
255 : * Please see the unpin_user_page() documentation for details.
256 : *
257 : * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
258 : * required, then the caller should a) verify that this is really correct,
259 : * because _lock() is usually required, and b) hand code it:
260 : * set_page_dirty_lock(), unpin_user_page().
261 : *
262 : */
263 0 : void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
264 : bool make_dirty)
265 : {
266 : unsigned long i;
267 : struct folio *folio;
268 : unsigned int nr;
269 :
270 0 : if (!make_dirty) {
271 0 : unpin_user_pages(pages, npages);
272 0 : return;
273 : }
274 :
275 0 : for (i = 0; i < npages; i += nr) {
276 0 : folio = gup_folio_next(pages, npages, i, &nr);
277 : /*
278 : * Checking PageDirty at this point may race with
279 : * clear_page_dirty_for_io(), but that's OK. Two key
280 : * cases:
281 : *
282 : * 1) This code sees the page as already dirty, so it
283 : * skips the call to set_page_dirty(). That could happen
284 : * because clear_page_dirty_for_io() called
285 : * page_mkclean(), followed by set_page_dirty().
286 : * However, now the page is going to get written back,
287 : * which meets the original intention of setting it
288 : * dirty, so all is well: clear_page_dirty_for_io() goes
289 : * on to call TestClearPageDirty(), and write the page
290 : * back.
291 : *
292 : * 2) This code sees the page as clean, so it calls
293 : * set_page_dirty(). The page stays dirty, despite being
294 : * written back, so it gets written back again in the
295 : * next writeback cycle. This is harmless.
296 : */
297 0 : if (!folio_test_dirty(folio)) {
298 0 : folio_lock(folio);
299 0 : folio_mark_dirty(folio);
300 0 : folio_unlock(folio);
301 : }
302 0 : gup_put_folio(folio, nr, FOLL_PIN);
303 : }
304 : }
305 : EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
306 :
307 : /**
308 : * unpin_user_page_range_dirty_lock() - release and optionally dirty
309 : * gup-pinned page range
310 : *
311 : * @page: the starting page of a range maybe marked dirty, and definitely released.
312 : * @npages: number of consecutive pages to release.
313 : * @make_dirty: whether to mark the pages dirty
314 : *
315 : * "gup-pinned page range" refers to a range of pages that has had one of the
316 : * pin_user_pages() variants called on that page.
317 : *
318 : * For the page ranges defined by [page .. page+npages], make that range (or
319 : * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
320 : * page range was previously listed as clean.
321 : *
322 : * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
323 : * required, then the caller should a) verify that this is really correct,
324 : * because _lock() is usually required, and b) hand code it:
325 : * set_page_dirty_lock(), unpin_user_page().
326 : *
327 : */
328 0 : void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
329 : bool make_dirty)
330 : {
331 : unsigned long i;
332 : struct folio *folio;
333 : unsigned int nr;
334 :
335 0 : for (i = 0; i < npages; i += nr) {
336 0 : folio = gup_folio_range_next(page, npages, i, &nr);
337 0 : if (make_dirty && !folio_test_dirty(folio)) {
338 0 : folio_lock(folio);
339 0 : folio_mark_dirty(folio);
340 0 : folio_unlock(folio);
341 : }
342 0 : gup_put_folio(folio, nr, FOLL_PIN);
343 : }
344 0 : }
345 : EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
346 :
347 : /**
348 : * unpin_user_pages() - release an array of gup-pinned pages.
349 : * @pages: array of pages to be marked dirty and released.
350 : * @npages: number of pages in the @pages array.
351 : *
352 : * For each page in the @pages array, release the page using unpin_user_page().
353 : *
354 : * Please see the unpin_user_page() documentation for details.
355 : */
356 0 : void unpin_user_pages(struct page **pages, unsigned long npages)
357 : {
358 : unsigned long i;
359 : struct folio *folio;
360 : unsigned int nr;
361 :
362 : /*
363 : * If this WARN_ON() fires, then the system *might* be leaking pages (by
364 : * leaving them pinned), but probably not. More likely, gup/pup returned
365 : * a hard -ERRNO error to the caller, who erroneously passed it here.
366 : */
367 0 : if (WARN_ON(IS_ERR_VALUE(npages)))
368 0 : return;
369 :
370 0 : for (i = 0; i < npages; i += nr) {
371 0 : folio = gup_folio_next(pages, npages, i, &nr);
372 0 : gup_put_folio(folio, nr, FOLL_PIN);
373 : }
374 : }
375 : EXPORT_SYMBOL(unpin_user_pages);
376 :
377 : /*
378 : * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
379 : * lifecycle. Avoid setting the bit unless necessary, or it might cause write
380 : * cache bouncing on large SMP machines for concurrent pinned gups.
381 : */
382 : static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
383 : {
384 0 : if (!test_bit(MMF_HAS_PINNED, mm_flags))
385 : set_bit(MMF_HAS_PINNED, mm_flags);
386 : }
387 :
388 : #ifdef CONFIG_MMU
389 : static struct page *no_page_table(struct vm_area_struct *vma,
390 : unsigned int flags)
391 : {
392 : /*
393 : * When core dumping an enormous anonymous area that nobody
394 : * has touched so far, we don't want to allocate unnecessary pages or
395 : * page tables. Return error instead of NULL to skip handle_mm_fault,
396 : * then get_dump_page() will return NULL to leave a hole in the dump.
397 : * But we can only make this optimization where a hole would surely
398 : * be zero-filled if handle_mm_fault() actually did handle it.
399 : */
400 0 : if ((flags & FOLL_DUMP) &&
401 0 : (vma_is_anonymous(vma) || !vma->vm_ops->fault))
402 : return ERR_PTR(-EFAULT);
403 : return NULL;
404 : }
405 :
406 0 : static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
407 : pte_t *pte, unsigned int flags)
408 : {
409 0 : if (flags & FOLL_TOUCH) {
410 0 : pte_t entry = *pte;
411 :
412 0 : if (flags & FOLL_WRITE)
413 : entry = pte_mkdirty(entry);
414 0 : entry = pte_mkyoung(entry);
415 :
416 0 : if (!pte_same(*pte, entry)) {
417 0 : set_pte_at(vma->vm_mm, address, pte, entry);
418 : update_mmu_cache(vma, address, pte);
419 : }
420 : }
421 :
422 : /* Proper page table entry exists, but no corresponding struct page */
423 0 : return -EEXIST;
424 : }
425 :
426 : /*
427 : * FOLL_FORCE can write to even unwritable pte's, but only
428 : * after we've gone through a COW cycle and they are dirty.
429 : */
430 : static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
431 : {
432 0 : return pte_write(pte) ||
433 0 : ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
434 : }
435 :
436 0 : static struct page *follow_page_pte(struct vm_area_struct *vma,
437 : unsigned long address, pmd_t *pmd, unsigned int flags,
438 : struct dev_pagemap **pgmap)
439 : {
440 0 : struct mm_struct *mm = vma->vm_mm;
441 : struct page *page;
442 : spinlock_t *ptl;
443 : pte_t *ptep, pte;
444 : int ret;
445 :
446 : /* FOLL_GET and FOLL_PIN are mutually exclusive. */
447 0 : if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
448 : (FOLL_PIN | FOLL_GET)))
449 : return ERR_PTR(-EINVAL);
450 : retry:
451 0 : if (unlikely(pmd_bad(*pmd)))
452 : return no_page_table(vma, flags);
453 :
454 0 : ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
455 0 : pte = *ptep;
456 0 : if (!pte_present(pte)) {
457 : swp_entry_t entry;
458 : /*
459 : * KSM's break_ksm() relies upon recognizing a ksm page
460 : * even while it is being migrated, so for that case we
461 : * need migration_entry_wait().
462 : */
463 0 : if (likely(!(flags & FOLL_MIGRATION)))
464 : goto no_page;
465 0 : if (pte_none(pte))
466 : goto no_page;
467 0 : entry = pte_to_swp_entry(pte);
468 0 : if (!is_migration_entry(entry))
469 : goto no_page;
470 0 : pte_unmap_unlock(ptep, ptl);
471 0 : migration_entry_wait(mm, pmd, address);
472 0 : goto retry;
473 : }
474 : if ((flags & FOLL_NUMA) && pte_protnone(pte))
475 : goto no_page;
476 0 : if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
477 0 : pte_unmap_unlock(ptep, ptl);
478 : return NULL;
479 : }
480 :
481 0 : page = vm_normal_page(vma, address, pte);
482 : if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
483 : /*
484 : * Only return device mapping pages in the FOLL_GET or FOLL_PIN
485 : * case since they are only valid while holding the pgmap
486 : * reference.
487 : */
488 : *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
489 : if (*pgmap)
490 : page = pte_page(pte);
491 : else
492 : goto no_page;
493 0 : } else if (unlikely(!page)) {
494 0 : if (flags & FOLL_DUMP) {
495 : /* Avoid special (like zero) pages in core dumps */
496 : page = ERR_PTR(-EFAULT);
497 : goto out;
498 : }
499 :
500 0 : if (is_zero_pfn(pte_pfn(pte))) {
501 0 : page = pte_page(pte);
502 : } else {
503 0 : ret = follow_pfn_pte(vma, address, ptep, flags);
504 0 : page = ERR_PTR(ret);
505 : goto out;
506 : }
507 : }
508 :
509 : /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
510 0 : if (unlikely(!try_grab_page(page, flags))) {
511 : page = ERR_PTR(-ENOMEM);
512 : goto out;
513 : }
514 : /*
515 : * We need to make the page accessible if and only if we are going
516 : * to access its content (the FOLL_PIN case). Please see
517 : * Documentation/core-api/pin_user_pages.rst for details.
518 : */
519 : if (flags & FOLL_PIN) {
520 : ret = arch_make_page_accessible(page);
521 : if (ret) {
522 : unpin_user_page(page);
523 : page = ERR_PTR(ret);
524 : goto out;
525 : }
526 : }
527 0 : if (flags & FOLL_TOUCH) {
528 0 : if ((flags & FOLL_WRITE) &&
529 0 : !pte_dirty(pte) && !PageDirty(page))
530 0 : set_page_dirty(page);
531 : /*
532 : * pte_mkyoung() would be more correct here, but atomic care
533 : * is needed to avoid losing the dirty bit: it is easier to use
534 : * mark_page_accessed().
535 : */
536 0 : mark_page_accessed(page);
537 : }
538 : out:
539 0 : pte_unmap_unlock(ptep, ptl);
540 : return page;
541 : no_page:
542 0 : pte_unmap_unlock(ptep, ptl);
543 0 : if (!pte_none(pte))
544 : return NULL;
545 : return no_page_table(vma, flags);
546 : }
547 :
548 0 : static struct page *follow_pmd_mask(struct vm_area_struct *vma,
549 : unsigned long address, pud_t *pudp,
550 : unsigned int flags,
551 : struct follow_page_context *ctx)
552 : {
553 : pmd_t *pmd, pmdval;
554 : spinlock_t *ptl;
555 : struct page *page;
556 0 : struct mm_struct *mm = vma->vm_mm;
557 :
558 0 : pmd = pmd_offset(pudp, address);
559 : /*
560 : * The READ_ONCE() will stabilize the pmdval in a register or
561 : * on the stack so that it will stop changing under the code.
562 : */
563 0 : pmdval = READ_ONCE(*pmd);
564 0 : if (pmd_none(pmdval))
565 : return no_page_table(vma, flags);
566 : if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
567 : page = follow_huge_pmd(mm, address, pmd, flags);
568 : if (page)
569 : return page;
570 : return no_page_table(vma, flags);
571 : }
572 : if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
573 : page = follow_huge_pd(vma, address,
574 : __hugepd(pmd_val(pmdval)), flags,
575 : PMD_SHIFT);
576 : if (page)
577 : return page;
578 : return no_page_table(vma, flags);
579 : }
580 : retry:
581 0 : if (!pmd_present(pmdval)) {
582 : /*
583 : * Should never reach here, if thp migration is not supported;
584 : * Otherwise, it must be a thp migration entry.
585 : */
586 : VM_BUG_ON(!thp_migration_supported() ||
587 : !is_pmd_migration_entry(pmdval));
588 :
589 0 : if (likely(!(flags & FOLL_MIGRATION)))
590 : return no_page_table(vma, flags);
591 :
592 0 : pmd_migration_entry_wait(mm, pmd);
593 0 : pmdval = READ_ONCE(*pmd);
594 : /*
595 : * MADV_DONTNEED may convert the pmd to null because
596 : * mmap_lock is held in read mode
597 : */
598 0 : if (pmd_none(pmdval))
599 : return no_page_table(vma, flags);
600 : goto retry;
601 : }
602 0 : if (pmd_devmap(pmdval)) {
603 : ptl = pmd_lock(mm, pmd);
604 : page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
605 : spin_unlock(ptl);
606 : if (page)
607 : return page;
608 : }
609 0 : if (likely(!pmd_trans_huge(pmdval)))
610 0 : return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
611 :
612 : if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
613 : return no_page_table(vma, flags);
614 :
615 : retry_locked:
616 : ptl = pmd_lock(mm, pmd);
617 : if (unlikely(pmd_none(*pmd))) {
618 : spin_unlock(ptl);
619 : return no_page_table(vma, flags);
620 : }
621 : if (unlikely(!pmd_present(*pmd))) {
622 : spin_unlock(ptl);
623 : if (likely(!(flags & FOLL_MIGRATION)))
624 : return no_page_table(vma, flags);
625 : pmd_migration_entry_wait(mm, pmd);
626 : goto retry_locked;
627 : }
628 : if (unlikely(!pmd_trans_huge(*pmd))) {
629 : spin_unlock(ptl);
630 : return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
631 : }
632 : if (flags & FOLL_SPLIT_PMD) {
633 : int ret;
634 : page = pmd_page(*pmd);
635 : if (is_huge_zero_page(page)) {
636 : spin_unlock(ptl);
637 : ret = 0;
638 : split_huge_pmd(vma, pmd, address);
639 : if (pmd_trans_unstable(pmd))
640 : ret = -EBUSY;
641 : } else {
642 : spin_unlock(ptl);
643 : split_huge_pmd(vma, pmd, address);
644 : ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
645 : }
646 :
647 : return ret ? ERR_PTR(ret) :
648 : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
649 : }
650 : page = follow_trans_huge_pmd(vma, address, pmd, flags);
651 : spin_unlock(ptl);
652 : ctx->page_mask = HPAGE_PMD_NR - 1;
653 : return page;
654 : }
655 :
656 0 : static struct page *follow_pud_mask(struct vm_area_struct *vma,
657 : unsigned long address, p4d_t *p4dp,
658 : unsigned int flags,
659 : struct follow_page_context *ctx)
660 : {
661 : pud_t *pud;
662 : spinlock_t *ptl;
663 : struct page *page;
664 0 : struct mm_struct *mm = vma->vm_mm;
665 :
666 0 : pud = pud_offset(p4dp, address);
667 0 : if (pud_none(*pud))
668 : return no_page_table(vma, flags);
669 : if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
670 : page = follow_huge_pud(mm, address, pud, flags);
671 : if (page)
672 : return page;
673 : return no_page_table(vma, flags);
674 : }
675 : if (is_hugepd(__hugepd(pud_val(*pud)))) {
676 : page = follow_huge_pd(vma, address,
677 : __hugepd(pud_val(*pud)), flags,
678 : PUD_SHIFT);
679 : if (page)
680 : return page;
681 : return no_page_table(vma, flags);
682 : }
683 : if (pud_devmap(*pud)) {
684 : ptl = pud_lock(mm, pud);
685 : page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
686 : spin_unlock(ptl);
687 : if (page)
688 : return page;
689 : }
690 0 : if (unlikely(pud_bad(*pud)))
691 : return no_page_table(vma, flags);
692 :
693 0 : return follow_pmd_mask(vma, address, pud, flags, ctx);
694 : }
695 :
696 : static struct page *follow_p4d_mask(struct vm_area_struct *vma,
697 : unsigned long address, pgd_t *pgdp,
698 : unsigned int flags,
699 : struct follow_page_context *ctx)
700 : {
701 : p4d_t *p4d;
702 : struct page *page;
703 :
704 0 : p4d = p4d_offset(pgdp, address);
705 : if (p4d_none(*p4d))
706 : return no_page_table(vma, flags);
707 : BUILD_BUG_ON(p4d_huge(*p4d));
708 : if (unlikely(p4d_bad(*p4d)))
709 : return no_page_table(vma, flags);
710 :
711 : if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
712 : page = follow_huge_pd(vma, address,
713 : __hugepd(p4d_val(*p4d)), flags,
714 : P4D_SHIFT);
715 : if (page)
716 : return page;
717 : return no_page_table(vma, flags);
718 : }
719 0 : return follow_pud_mask(vma, address, p4d, flags, ctx);
720 : }
721 :
722 : /**
723 : * follow_page_mask - look up a page descriptor from a user-virtual address
724 : * @vma: vm_area_struct mapping @address
725 : * @address: virtual address to look up
726 : * @flags: flags modifying lookup behaviour
727 : * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
728 : * pointer to output page_mask
729 : *
730 : * @flags can have FOLL_ flags set, defined in <linux/mm.h>
731 : *
732 : * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
733 : * the device's dev_pagemap metadata to avoid repeating expensive lookups.
734 : *
735 : * On output, the @ctx->page_mask is set according to the size of the page.
736 : *
737 : * Return: the mapped (struct page *), %NULL if no mapping exists, or
738 : * an error pointer if there is a mapping to something not represented
739 : * by a page descriptor (see also vm_normal_page()).
740 : */
741 : static struct page *follow_page_mask(struct vm_area_struct *vma,
742 : unsigned long address, unsigned int flags,
743 : struct follow_page_context *ctx)
744 : {
745 : pgd_t *pgd;
746 : struct page *page;
747 0 : struct mm_struct *mm = vma->vm_mm;
748 :
749 0 : ctx->page_mask = 0;
750 :
751 : /* make this handle hugepd */
752 0 : page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
753 0 : if (!IS_ERR(page)) {
754 : WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
755 : return page;
756 : }
757 :
758 0 : pgd = pgd_offset(mm, address);
759 :
760 : if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
761 : return no_page_table(vma, flags);
762 :
763 : if (pgd_huge(*pgd)) {
764 : page = follow_huge_pgd(mm, address, pgd, flags);
765 : if (page)
766 : return page;
767 : return no_page_table(vma, flags);
768 : }
769 : if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
770 : page = follow_huge_pd(vma, address,
771 : __hugepd(pgd_val(*pgd)), flags,
772 : PGDIR_SHIFT);
773 : if (page)
774 : return page;
775 : return no_page_table(vma, flags);
776 : }
777 :
778 0 : return follow_p4d_mask(vma, address, pgd, flags, ctx);
779 : }
780 :
781 0 : struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
782 : unsigned int foll_flags)
783 : {
784 0 : struct follow_page_context ctx = { NULL };
785 : struct page *page;
786 :
787 0 : if (vma_is_secretmem(vma))
788 : return NULL;
789 :
790 0 : page = follow_page_mask(vma, address, foll_flags, &ctx);
791 : if (ctx.pgmap)
792 : put_dev_pagemap(ctx.pgmap);
793 : return page;
794 : }
795 :
796 : static int get_gate_page(struct mm_struct *mm, unsigned long address,
797 : unsigned int gup_flags, struct vm_area_struct **vma,
798 : struct page **page)
799 : {
800 : pgd_t *pgd;
801 : p4d_t *p4d;
802 : pud_t *pud;
803 : pmd_t *pmd;
804 : pte_t *pte;
805 : int ret = -EFAULT;
806 :
807 : /* user gate pages are read-only */
808 : if (gup_flags & FOLL_WRITE)
809 : return -EFAULT;
810 : if (address > TASK_SIZE)
811 : pgd = pgd_offset_k(address);
812 : else
813 : pgd = pgd_offset_gate(mm, address);
814 : if (pgd_none(*pgd))
815 : return -EFAULT;
816 : p4d = p4d_offset(pgd, address);
817 : if (p4d_none(*p4d))
818 : return -EFAULT;
819 : pud = pud_offset(p4d, address);
820 : if (pud_none(*pud))
821 : return -EFAULT;
822 : pmd = pmd_offset(pud, address);
823 : if (!pmd_present(*pmd))
824 : return -EFAULT;
825 : VM_BUG_ON(pmd_trans_huge(*pmd));
826 : pte = pte_offset_map(pmd, address);
827 : if (pte_none(*pte))
828 : goto unmap;
829 : *vma = get_gate_vma(mm);
830 : if (!page)
831 : goto out;
832 : *page = vm_normal_page(*vma, address, *pte);
833 : if (!*page) {
834 : if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
835 : goto unmap;
836 : *page = pte_page(*pte);
837 : }
838 : if (unlikely(!try_grab_page(*page, gup_flags))) {
839 : ret = -ENOMEM;
840 : goto unmap;
841 : }
842 : out:
843 : ret = 0;
844 : unmap:
845 : pte_unmap(pte);
846 : return ret;
847 : }
848 :
849 : /*
850 : * mmap_lock must be held on entry. If @locked != NULL and *@flags
851 : * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
852 : * is, *@locked will be set to 0 and -EBUSY returned.
853 : */
854 0 : static int faultin_page(struct vm_area_struct *vma,
855 : unsigned long address, unsigned int *flags, int *locked)
856 : {
857 0 : unsigned int fault_flags = 0;
858 : vm_fault_t ret;
859 :
860 0 : if (*flags & FOLL_NOFAULT)
861 : return -EFAULT;
862 0 : if (*flags & FOLL_WRITE)
863 0 : fault_flags |= FAULT_FLAG_WRITE;
864 0 : if (*flags & FOLL_REMOTE)
865 0 : fault_flags |= FAULT_FLAG_REMOTE;
866 0 : if (locked)
867 0 : fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
868 0 : if (*flags & FOLL_NOWAIT)
869 0 : fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
870 0 : if (*flags & FOLL_TRIED) {
871 : /*
872 : * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
873 : * can co-exist
874 : */
875 0 : fault_flags |= FAULT_FLAG_TRIED;
876 : }
877 :
878 0 : ret = handle_mm_fault(vma, address, fault_flags, NULL);
879 0 : if (ret & VM_FAULT_ERROR) {
880 0 : int err = vm_fault_to_errno(ret, *flags);
881 :
882 0 : if (err)
883 : return err;
884 0 : BUG();
885 : }
886 :
887 0 : if (ret & VM_FAULT_RETRY) {
888 0 : if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
889 0 : *locked = 0;
890 : return -EBUSY;
891 : }
892 :
893 : /*
894 : * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
895 : * necessary, even if maybe_mkwrite decided not to set pte_write. We
896 : * can thus safely do subsequent page lookups as if they were reads.
897 : * But only do so when looping for pte_write is futile: in some cases
898 : * userspace may also be wanting to write to the gotten user page,
899 : * which a read fault here might prevent (a readonly page might get
900 : * reCOWed by userspace write).
901 : */
902 0 : if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
903 0 : *flags |= FOLL_COW;
904 : return 0;
905 : }
906 :
907 0 : static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
908 : {
909 0 : vm_flags_t vm_flags = vma->vm_flags;
910 0 : int write = (gup_flags & FOLL_WRITE);
911 0 : int foreign = (gup_flags & FOLL_REMOTE);
912 :
913 0 : if (vm_flags & (VM_IO | VM_PFNMAP))
914 : return -EFAULT;
915 :
916 0 : if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
917 : return -EFAULT;
918 :
919 : if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
920 : return -EOPNOTSUPP;
921 :
922 0 : if (vma_is_secretmem(vma))
923 : return -EFAULT;
924 :
925 0 : if (write) {
926 0 : if (!(vm_flags & VM_WRITE)) {
927 0 : if (!(gup_flags & FOLL_FORCE))
928 : return -EFAULT;
929 : /*
930 : * We used to let the write,force case do COW in a
931 : * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
932 : * set a breakpoint in a read-only mapping of an
933 : * executable, without corrupting the file (yet only
934 : * when that file had been opened for writing!).
935 : * Anon pages in shared mappings are surprising: now
936 : * just reject it.
937 : */
938 0 : if (!is_cow_mapping(vm_flags))
939 : return -EFAULT;
940 : }
941 0 : } else if (!(vm_flags & VM_READ)) {
942 0 : if (!(gup_flags & FOLL_FORCE))
943 : return -EFAULT;
944 : /*
945 : * Is there actually any vma we can reach here which does not
946 : * have VM_MAYREAD set?
947 : */
948 0 : if (!(vm_flags & VM_MAYREAD))
949 : return -EFAULT;
950 : }
951 : /*
952 : * gups are always data accesses, not instruction
953 : * fetches, so execute=false here
954 : */
955 0 : if (!arch_vma_access_permitted(vma, write, false, foreign))
956 : return -EFAULT;
957 0 : return 0;
958 : }
959 :
960 : /**
961 : * __get_user_pages() - pin user pages in memory
962 : * @mm: mm_struct of target mm
963 : * @start: starting user address
964 : * @nr_pages: number of pages from start to pin
965 : * @gup_flags: flags modifying pin behaviour
966 : * @pages: array that receives pointers to the pages pinned.
967 : * Should be at least nr_pages long. Or NULL, if caller
968 : * only intends to ensure the pages are faulted in.
969 : * @vmas: array of pointers to vmas corresponding to each page.
970 : * Or NULL if the caller does not require them.
971 : * @locked: whether we're still with the mmap_lock held
972 : *
973 : * Returns either number of pages pinned (which may be less than the
974 : * number requested), or an error. Details about the return value:
975 : *
976 : * -- If nr_pages is 0, returns 0.
977 : * -- If nr_pages is >0, but no pages were pinned, returns -errno.
978 : * -- If nr_pages is >0, and some pages were pinned, returns the number of
979 : * pages pinned. Again, this may be less than nr_pages.
980 : * -- 0 return value is possible when the fault would need to be retried.
981 : *
982 : * The caller is responsible for releasing returned @pages, via put_page().
983 : *
984 : * @vmas are valid only as long as mmap_lock is held.
985 : *
986 : * Must be called with mmap_lock held. It may be released. See below.
987 : *
988 : * __get_user_pages walks a process's page tables and takes a reference to
989 : * each struct page that each user address corresponds to at a given
990 : * instant. That is, it takes the page that would be accessed if a user
991 : * thread accesses the given user virtual address at that instant.
992 : *
993 : * This does not guarantee that the page exists in the user mappings when
994 : * __get_user_pages returns, and there may even be a completely different
995 : * page there in some cases (eg. if mmapped pagecache has been invalidated
996 : * and subsequently re faulted). However it does guarantee that the page
997 : * won't be freed completely. And mostly callers simply care that the page
998 : * contains data that was valid *at some point in time*. Typically, an IO
999 : * or similar operation cannot guarantee anything stronger anyway because
1000 : * locks can't be held over the syscall boundary.
1001 : *
1002 : * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1003 : * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1004 : * appropriate) must be called after the page is finished with, and
1005 : * before put_page is called.
1006 : *
1007 : * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
1008 : * released by an up_read(). That can happen if @gup_flags does not
1009 : * have FOLL_NOWAIT.
1010 : *
1011 : * A caller using such a combination of @locked and @gup_flags
1012 : * must therefore hold the mmap_lock for reading only, and recognize
1013 : * when it's been released. Otherwise, it must be held for either
1014 : * reading or writing and will not be released.
1015 : *
1016 : * In most cases, get_user_pages or get_user_pages_fast should be used
1017 : * instead of __get_user_pages. __get_user_pages should be used only if
1018 : * you need some special @gup_flags.
1019 : */
1020 0 : static long __get_user_pages(struct mm_struct *mm,
1021 : unsigned long start, unsigned long nr_pages,
1022 : unsigned int gup_flags, struct page **pages,
1023 : struct vm_area_struct **vmas, int *locked)
1024 : {
1025 0 : long ret = 0, i = 0;
1026 0 : struct vm_area_struct *vma = NULL;
1027 0 : struct follow_page_context ctx = { NULL };
1028 :
1029 0 : if (!nr_pages)
1030 : return 0;
1031 :
1032 0 : start = untagged_addr(start);
1033 :
1034 : VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1035 :
1036 : /*
1037 : * If FOLL_FORCE is set then do not force a full fault as the hinting
1038 : * fault information is unrelated to the reference behaviour of a task
1039 : * using the address space
1040 : */
1041 0 : if (!(gup_flags & FOLL_FORCE))
1042 0 : gup_flags |= FOLL_NUMA;
1043 :
1044 : do {
1045 : struct page *page;
1046 0 : unsigned int foll_flags = gup_flags;
1047 : unsigned int page_increm;
1048 :
1049 : /* first iteration or cross vma bound */
1050 0 : if (!vma || start >= vma->vm_end) {
1051 0 : vma = find_extend_vma(mm, start);
1052 : if (!vma && in_gate_area(mm, start)) {
1053 : ret = get_gate_page(mm, start & PAGE_MASK,
1054 : gup_flags, &vma,
1055 : pages ? &pages[i] : NULL);
1056 : if (ret)
1057 : goto out;
1058 : ctx.page_mask = 0;
1059 : goto next_page;
1060 : }
1061 :
1062 0 : if (!vma) {
1063 : ret = -EFAULT;
1064 : goto out;
1065 : }
1066 0 : ret = check_vma_flags(vma, gup_flags);
1067 0 : if (ret)
1068 : goto out;
1069 :
1070 : if (is_vm_hugetlb_page(vma)) {
1071 : i = follow_hugetlb_page(mm, vma, pages, vmas,
1072 : &start, &nr_pages, i,
1073 : gup_flags, locked);
1074 : if (locked && *locked == 0) {
1075 : /*
1076 : * We've got a VM_FAULT_RETRY
1077 : * and we've lost mmap_lock.
1078 : * We must stop here.
1079 : */
1080 : BUG_ON(gup_flags & FOLL_NOWAIT);
1081 : goto out;
1082 : }
1083 : continue;
1084 : }
1085 : }
1086 : retry:
1087 : /*
1088 : * If we have a pending SIGKILL, don't keep faulting pages and
1089 : * potentially allocating memory.
1090 : */
1091 0 : if (fatal_signal_pending(current)) {
1092 : ret = -EINTR;
1093 : goto out;
1094 : }
1095 0 : cond_resched();
1096 :
1097 0 : page = follow_page_mask(vma, start, foll_flags, &ctx);
1098 0 : if (!page) {
1099 0 : ret = faultin_page(vma, start, &foll_flags, locked);
1100 0 : switch (ret) {
1101 : case 0:
1102 : goto retry;
1103 : case -EBUSY:
1104 0 : ret = 0;
1105 : fallthrough;
1106 : case -EFAULT:
1107 : case -ENOMEM:
1108 : case -EHWPOISON:
1109 : goto out;
1110 : }
1111 0 : BUG();
1112 0 : } else if (PTR_ERR(page) == -EEXIST) {
1113 : /*
1114 : * Proper page table entry exists, but no corresponding
1115 : * struct page. If the caller expects **pages to be
1116 : * filled in, bail out now, because that can't be done
1117 : * for this page.
1118 : */
1119 0 : if (pages) {
1120 : ret = PTR_ERR(page);
1121 : goto out;
1122 : }
1123 :
1124 : goto next_page;
1125 0 : } else if (IS_ERR(page)) {
1126 : ret = PTR_ERR(page);
1127 : goto out;
1128 : }
1129 0 : if (pages) {
1130 0 : pages[i] = page;
1131 0 : flush_anon_page(vma, page, start);
1132 : flush_dcache_page(page);
1133 0 : ctx.page_mask = 0;
1134 : }
1135 : next_page:
1136 0 : if (vmas) {
1137 0 : vmas[i] = vma;
1138 0 : ctx.page_mask = 0;
1139 : }
1140 0 : page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1141 0 : if (page_increm > nr_pages)
1142 0 : page_increm = nr_pages;
1143 0 : i += page_increm;
1144 0 : start += page_increm * PAGE_SIZE;
1145 0 : nr_pages -= page_increm;
1146 0 : } while (nr_pages);
1147 : out:
1148 : if (ctx.pgmap)
1149 : put_dev_pagemap(ctx.pgmap);
1150 0 : return i ? i : ret;
1151 : }
1152 :
1153 : static bool vma_permits_fault(struct vm_area_struct *vma,
1154 : unsigned int fault_flags)
1155 : {
1156 0 : bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1157 0 : bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1158 0 : vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1159 :
1160 0 : if (!(vm_flags & vma->vm_flags))
1161 : return false;
1162 :
1163 : /*
1164 : * The architecture might have a hardware protection
1165 : * mechanism other than read/write that can deny access.
1166 : *
1167 : * gup always represents data access, not instruction
1168 : * fetches, so execute=false here:
1169 : */
1170 0 : if (!arch_vma_access_permitted(vma, write, false, foreign))
1171 : return false;
1172 :
1173 : return true;
1174 : }
1175 :
1176 : /**
1177 : * fixup_user_fault() - manually resolve a user page fault
1178 : * @mm: mm_struct of target mm
1179 : * @address: user address
1180 : * @fault_flags:flags to pass down to handle_mm_fault()
1181 : * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
1182 : * does not allow retry. If NULL, the caller must guarantee
1183 : * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1184 : *
1185 : * This is meant to be called in the specific scenario where for locking reasons
1186 : * we try to access user memory in atomic context (within a pagefault_disable()
1187 : * section), this returns -EFAULT, and we want to resolve the user fault before
1188 : * trying again.
1189 : *
1190 : * Typically this is meant to be used by the futex code.
1191 : *
1192 : * The main difference with get_user_pages() is that this function will
1193 : * unconditionally call handle_mm_fault() which will in turn perform all the
1194 : * necessary SW fixup of the dirty and young bits in the PTE, while
1195 : * get_user_pages() only guarantees to update these in the struct page.
1196 : *
1197 : * This is important for some architectures where those bits also gate the
1198 : * access permission to the page because they are maintained in software. On
1199 : * such architectures, gup() will not be enough to make a subsequent access
1200 : * succeed.
1201 : *
1202 : * This function will not return with an unlocked mmap_lock. So it has not the
1203 : * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1204 : */
1205 0 : int fixup_user_fault(struct mm_struct *mm,
1206 : unsigned long address, unsigned int fault_flags,
1207 : bool *unlocked)
1208 : {
1209 : struct vm_area_struct *vma;
1210 : vm_fault_t ret;
1211 :
1212 0 : address = untagged_addr(address);
1213 :
1214 0 : if (unlocked)
1215 0 : fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1216 :
1217 : retry:
1218 0 : vma = find_extend_vma(mm, address);
1219 0 : if (!vma || address < vma->vm_start)
1220 : return -EFAULT;
1221 :
1222 0 : if (!vma_permits_fault(vma, fault_flags))
1223 : return -EFAULT;
1224 :
1225 0 : if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1226 0 : fatal_signal_pending(current))
1227 : return -EINTR;
1228 :
1229 0 : ret = handle_mm_fault(vma, address, fault_flags, NULL);
1230 0 : if (ret & VM_FAULT_ERROR) {
1231 0 : int err = vm_fault_to_errno(ret, 0);
1232 :
1233 0 : if (err)
1234 : return err;
1235 0 : BUG();
1236 : }
1237 :
1238 0 : if (ret & VM_FAULT_RETRY) {
1239 0 : mmap_read_lock(mm);
1240 0 : *unlocked = true;
1241 0 : fault_flags |= FAULT_FLAG_TRIED;
1242 0 : goto retry;
1243 : }
1244 :
1245 : return 0;
1246 : }
1247 : EXPORT_SYMBOL_GPL(fixup_user_fault);
1248 :
1249 : /*
1250 : * Please note that this function, unlike __get_user_pages will not
1251 : * return 0 for nr_pages > 0 without FOLL_NOWAIT
1252 : */
1253 : static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1254 : unsigned long start,
1255 : unsigned long nr_pages,
1256 : struct page **pages,
1257 : struct vm_area_struct **vmas,
1258 : int *locked,
1259 : unsigned int flags)
1260 : {
1261 : long ret, pages_done;
1262 : bool lock_dropped;
1263 :
1264 0 : if (locked) {
1265 : /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1266 0 : BUG_ON(vmas);
1267 : /* check caller initialized locked */
1268 0 : BUG_ON(*locked != 1);
1269 : }
1270 :
1271 0 : if (flags & FOLL_PIN)
1272 0 : mm_set_has_pinned_flag(&mm->flags);
1273 :
1274 : /*
1275 : * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1276 : * is to set FOLL_GET if the caller wants pages[] filled in (but has
1277 : * carelessly failed to specify FOLL_GET), so keep doing that, but only
1278 : * for FOLL_GET, not for the newer FOLL_PIN.
1279 : *
1280 : * FOLL_PIN always expects pages to be non-null, but no need to assert
1281 : * that here, as any failures will be obvious enough.
1282 : */
1283 0 : if (pages && !(flags & FOLL_PIN))
1284 0 : flags |= FOLL_GET;
1285 :
1286 0 : pages_done = 0;
1287 0 : lock_dropped = false;
1288 : for (;;) {
1289 0 : ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1290 : vmas, locked);
1291 0 : if (!locked)
1292 : /* VM_FAULT_RETRY couldn't trigger, bypass */
1293 : return ret;
1294 :
1295 : /* VM_FAULT_RETRY cannot return errors */
1296 0 : if (!*locked) {
1297 0 : BUG_ON(ret < 0);
1298 0 : BUG_ON(ret >= nr_pages);
1299 : }
1300 :
1301 0 : if (ret > 0) {
1302 0 : nr_pages -= ret;
1303 0 : pages_done += ret;
1304 0 : if (!nr_pages)
1305 : break;
1306 : }
1307 0 : if (*locked) {
1308 : /*
1309 : * VM_FAULT_RETRY didn't trigger or it was a
1310 : * FOLL_NOWAIT.
1311 : */
1312 0 : if (!pages_done)
1313 0 : pages_done = ret;
1314 : break;
1315 : }
1316 : /*
1317 : * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1318 : * For the prefault case (!pages) we only update counts.
1319 : */
1320 0 : if (likely(pages))
1321 0 : pages += ret;
1322 0 : start += ret << PAGE_SHIFT;
1323 0 : lock_dropped = true;
1324 :
1325 : retry:
1326 : /*
1327 : * Repeat on the address that fired VM_FAULT_RETRY
1328 : * with both FAULT_FLAG_ALLOW_RETRY and
1329 : * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1330 : * by fatal signals, so we need to check it before we
1331 : * start trying again otherwise it can loop forever.
1332 : */
1333 :
1334 0 : if (fatal_signal_pending(current)) {
1335 0 : if (!pages_done)
1336 0 : pages_done = -EINTR;
1337 : break;
1338 : }
1339 :
1340 0 : ret = mmap_read_lock_killable(mm);
1341 0 : if (ret) {
1342 0 : BUG_ON(ret > 0);
1343 0 : if (!pages_done)
1344 0 : pages_done = ret;
1345 : break;
1346 : }
1347 :
1348 0 : *locked = 1;
1349 0 : ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1350 : pages, NULL, locked);
1351 0 : if (!*locked) {
1352 : /* Continue to retry until we succeeded */
1353 0 : BUG_ON(ret != 0);
1354 : goto retry;
1355 : }
1356 0 : if (ret != 1) {
1357 0 : BUG_ON(ret > 1);
1358 0 : if (!pages_done)
1359 0 : pages_done = ret;
1360 : break;
1361 : }
1362 0 : nr_pages--;
1363 0 : pages_done++;
1364 0 : if (!nr_pages)
1365 : break;
1366 0 : if (likely(pages))
1367 0 : pages++;
1368 0 : start += PAGE_SIZE;
1369 : }
1370 0 : if (lock_dropped && *locked) {
1371 : /*
1372 : * We must let the caller know we temporarily dropped the lock
1373 : * and so the critical section protected by it was lost.
1374 : */
1375 0 : mmap_read_unlock(mm);
1376 0 : *locked = 0;
1377 : }
1378 : return pages_done;
1379 : }
1380 :
1381 : /**
1382 : * populate_vma_page_range() - populate a range of pages in the vma.
1383 : * @vma: target vma
1384 : * @start: start address
1385 : * @end: end address
1386 : * @locked: whether the mmap_lock is still held
1387 : *
1388 : * This takes care of mlocking the pages too if VM_LOCKED is set.
1389 : *
1390 : * Return either number of pages pinned in the vma, or a negative error
1391 : * code on error.
1392 : *
1393 : * vma->vm_mm->mmap_lock must be held.
1394 : *
1395 : * If @locked is NULL, it may be held for read or write and will
1396 : * be unperturbed.
1397 : *
1398 : * If @locked is non-NULL, it must held for read only and may be
1399 : * released. If it's released, *@locked will be set to 0.
1400 : */
1401 0 : long populate_vma_page_range(struct vm_area_struct *vma,
1402 : unsigned long start, unsigned long end, int *locked)
1403 : {
1404 0 : struct mm_struct *mm = vma->vm_mm;
1405 0 : unsigned long nr_pages = (end - start) / PAGE_SIZE;
1406 : int gup_flags;
1407 : long ret;
1408 :
1409 : VM_BUG_ON(!PAGE_ALIGNED(start));
1410 : VM_BUG_ON(!PAGE_ALIGNED(end));
1411 : VM_BUG_ON_VMA(start < vma->vm_start, vma);
1412 : VM_BUG_ON_VMA(end > vma->vm_end, vma);
1413 0 : mmap_assert_locked(mm);
1414 :
1415 : /*
1416 : * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1417 : * faultin_page() to break COW, so it has no work to do here.
1418 : */
1419 0 : if (vma->vm_flags & VM_LOCKONFAULT)
1420 0 : return nr_pages;
1421 :
1422 0 : gup_flags = FOLL_TOUCH;
1423 : /*
1424 : * We want to touch writable mappings with a write fault in order
1425 : * to break COW, except for shared mappings because these don't COW
1426 : * and we would not want to dirty them for nothing.
1427 : */
1428 0 : if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1429 0 : gup_flags |= FOLL_WRITE;
1430 :
1431 : /*
1432 : * We want mlock to succeed for regions that have any permissions
1433 : * other than PROT_NONE.
1434 : */
1435 0 : if (vma_is_accessible(vma))
1436 0 : gup_flags |= FOLL_FORCE;
1437 :
1438 : /*
1439 : * We made sure addr is within a VMA, so the following will
1440 : * not result in a stack expansion that recurses back here.
1441 : */
1442 0 : ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1443 : NULL, NULL, locked);
1444 0 : lru_add_drain();
1445 0 : return ret;
1446 : }
1447 :
1448 : /*
1449 : * faultin_vma_page_range() - populate (prefault) page tables inside the
1450 : * given VMA range readable/writable
1451 : *
1452 : * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1453 : *
1454 : * @vma: target vma
1455 : * @start: start address
1456 : * @end: end address
1457 : * @write: whether to prefault readable or writable
1458 : * @locked: whether the mmap_lock is still held
1459 : *
1460 : * Returns either number of processed pages in the vma, or a negative error
1461 : * code on error (see __get_user_pages()).
1462 : *
1463 : * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1464 : * covered by the VMA.
1465 : *
1466 : * If @locked is NULL, it may be held for read or write and will be unperturbed.
1467 : *
1468 : * If @locked is non-NULL, it must held for read only and may be released. If
1469 : * it's released, *@locked will be set to 0.
1470 : */
1471 0 : long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1472 : unsigned long end, bool write, int *locked)
1473 : {
1474 0 : struct mm_struct *mm = vma->vm_mm;
1475 0 : unsigned long nr_pages = (end - start) / PAGE_SIZE;
1476 : int gup_flags;
1477 : long ret;
1478 :
1479 : VM_BUG_ON(!PAGE_ALIGNED(start));
1480 : VM_BUG_ON(!PAGE_ALIGNED(end));
1481 : VM_BUG_ON_VMA(start < vma->vm_start, vma);
1482 : VM_BUG_ON_VMA(end > vma->vm_end, vma);
1483 0 : mmap_assert_locked(mm);
1484 :
1485 : /*
1486 : * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1487 : * the page dirty with FOLL_WRITE -- which doesn't make a
1488 : * difference with !FOLL_FORCE, because the page is writable
1489 : * in the page table.
1490 : * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1491 : * a poisoned page.
1492 : * !FOLL_FORCE: Require proper access permissions.
1493 : */
1494 0 : gup_flags = FOLL_TOUCH | FOLL_HWPOISON;
1495 0 : if (write)
1496 0 : gup_flags |= FOLL_WRITE;
1497 :
1498 : /*
1499 : * We want to report -EINVAL instead of -EFAULT for any permission
1500 : * problems or incompatible mappings.
1501 : */
1502 0 : if (check_vma_flags(vma, gup_flags))
1503 : return -EINVAL;
1504 :
1505 0 : ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1506 : NULL, NULL, locked);
1507 0 : lru_add_drain();
1508 0 : return ret;
1509 : }
1510 :
1511 : /*
1512 : * __mm_populate - populate and/or mlock pages within a range of address space.
1513 : *
1514 : * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1515 : * flags. VMAs must be already marked with the desired vm_flags, and
1516 : * mmap_lock must not be held.
1517 : */
1518 0 : int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1519 : {
1520 0 : struct mm_struct *mm = current->mm;
1521 : unsigned long end, nstart, nend;
1522 0 : struct vm_area_struct *vma = NULL;
1523 0 : int locked = 0;
1524 0 : long ret = 0;
1525 :
1526 0 : end = start + len;
1527 :
1528 0 : for (nstart = start; nstart < end; nstart = nend) {
1529 : /*
1530 : * We want to fault in pages for [nstart; end) address range.
1531 : * Find first corresponding VMA.
1532 : */
1533 0 : if (!locked) {
1534 0 : locked = 1;
1535 0 : mmap_read_lock(mm);
1536 0 : vma = find_vma(mm, nstart);
1537 0 : } else if (nstart >= vma->vm_end)
1538 0 : vma = vma->vm_next;
1539 0 : if (!vma || vma->vm_start >= end)
1540 : break;
1541 : /*
1542 : * Set [nstart; nend) to intersection of desired address
1543 : * range with the first VMA. Also, skip undesirable VMA types.
1544 : */
1545 0 : nend = min(end, vma->vm_end);
1546 0 : if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1547 0 : continue;
1548 0 : if (nstart < vma->vm_start)
1549 0 : nstart = vma->vm_start;
1550 : /*
1551 : * Now fault in a range of pages. populate_vma_page_range()
1552 : * double checks the vma flags, so that it won't mlock pages
1553 : * if the vma was already munlocked.
1554 : */
1555 0 : ret = populate_vma_page_range(vma, nstart, nend, &locked);
1556 0 : if (ret < 0) {
1557 0 : if (ignore_errors) {
1558 0 : ret = 0;
1559 0 : continue; /* continue at next VMA */
1560 : }
1561 : break;
1562 : }
1563 0 : nend = nstart + ret * PAGE_SIZE;
1564 0 : ret = 0;
1565 : }
1566 0 : if (locked)
1567 : mmap_read_unlock(mm);
1568 0 : return ret; /* 0 or negative error code */
1569 : }
1570 : #else /* CONFIG_MMU */
1571 : static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
1572 : unsigned long nr_pages, struct page **pages,
1573 : struct vm_area_struct **vmas, int *locked,
1574 : unsigned int foll_flags)
1575 : {
1576 : struct vm_area_struct *vma;
1577 : unsigned long vm_flags;
1578 : long i;
1579 :
1580 : /* calculate required read or write permissions.
1581 : * If FOLL_FORCE is set, we only require the "MAY" flags.
1582 : */
1583 : vm_flags = (foll_flags & FOLL_WRITE) ?
1584 : (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1585 : vm_flags &= (foll_flags & FOLL_FORCE) ?
1586 : (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1587 :
1588 : for (i = 0; i < nr_pages; i++) {
1589 : vma = find_vma(mm, start);
1590 : if (!vma)
1591 : goto finish_or_fault;
1592 :
1593 : /* protect what we can, including chardevs */
1594 : if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1595 : !(vm_flags & vma->vm_flags))
1596 : goto finish_or_fault;
1597 :
1598 : if (pages) {
1599 : pages[i] = virt_to_page(start);
1600 : if (pages[i])
1601 : get_page(pages[i]);
1602 : }
1603 : if (vmas)
1604 : vmas[i] = vma;
1605 : start = (start + PAGE_SIZE) & PAGE_MASK;
1606 : }
1607 :
1608 : return i;
1609 :
1610 : finish_or_fault:
1611 : return i ? : -EFAULT;
1612 : }
1613 : #endif /* !CONFIG_MMU */
1614 :
1615 : /**
1616 : * fault_in_writeable - fault in userspace address range for writing
1617 : * @uaddr: start of address range
1618 : * @size: size of address range
1619 : *
1620 : * Returns the number of bytes not faulted in (like copy_to_user() and
1621 : * copy_from_user()).
1622 : */
1623 0 : size_t fault_in_writeable(char __user *uaddr, size_t size)
1624 : {
1625 0 : char __user *start = uaddr, *end;
1626 :
1627 0 : if (unlikely(size == 0))
1628 : return 0;
1629 0 : if (!user_write_access_begin(uaddr, size))
1630 : return size;
1631 0 : if (!PAGE_ALIGNED(uaddr)) {
1632 0 : unsafe_put_user(0, uaddr, out);
1633 0 : uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
1634 : }
1635 0 : end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
1636 0 : if (unlikely(end < start))
1637 0 : end = NULL;
1638 0 : while (uaddr != end) {
1639 0 : unsafe_put_user(0, uaddr, out);
1640 0 : uaddr += PAGE_SIZE;
1641 : }
1642 :
1643 : out:
1644 : user_write_access_end();
1645 0 : if (size > uaddr - start)
1646 0 : return size - (uaddr - start);
1647 : return 0;
1648 : }
1649 : EXPORT_SYMBOL(fault_in_writeable);
1650 :
1651 : /*
1652 : * fault_in_safe_writeable - fault in an address range for writing
1653 : * @uaddr: start of address range
1654 : * @size: length of address range
1655 : *
1656 : * Faults in an address range for writing. This is primarily useful when we
1657 : * already know that some or all of the pages in the address range aren't in
1658 : * memory.
1659 : *
1660 : * Unlike fault_in_writeable(), this function is non-destructive.
1661 : *
1662 : * Note that we don't pin or otherwise hold the pages referenced that we fault
1663 : * in. There's no guarantee that they'll stay in memory for any duration of
1664 : * time.
1665 : *
1666 : * Returns the number of bytes not faulted in, like copy_to_user() and
1667 : * copy_from_user().
1668 : */
1669 0 : size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
1670 : {
1671 0 : unsigned long start = (unsigned long)uaddr, end;
1672 0 : struct mm_struct *mm = current->mm;
1673 0 : bool unlocked = false;
1674 :
1675 0 : if (unlikely(size == 0))
1676 : return 0;
1677 0 : end = PAGE_ALIGN(start + size);
1678 0 : if (end < start)
1679 0 : end = 0;
1680 :
1681 : mmap_read_lock(mm);
1682 : do {
1683 0 : if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
1684 : break;
1685 0 : start = (start + PAGE_SIZE) & PAGE_MASK;
1686 0 : } while (start != end);
1687 0 : mmap_read_unlock(mm);
1688 :
1689 0 : if (size > (unsigned long)uaddr - start)
1690 0 : return size - ((unsigned long)uaddr - start);
1691 : return 0;
1692 : }
1693 : EXPORT_SYMBOL(fault_in_safe_writeable);
1694 :
1695 : /**
1696 : * fault_in_readable - fault in userspace address range for reading
1697 : * @uaddr: start of user address range
1698 : * @size: size of user address range
1699 : *
1700 : * Returns the number of bytes not faulted in (like copy_to_user() and
1701 : * copy_from_user()).
1702 : */
1703 0 : size_t fault_in_readable(const char __user *uaddr, size_t size)
1704 : {
1705 0 : const char __user *start = uaddr, *end;
1706 : volatile char c;
1707 :
1708 0 : if (unlikely(size == 0))
1709 : return 0;
1710 0 : if (!user_read_access_begin(uaddr, size))
1711 : return size;
1712 0 : if (!PAGE_ALIGNED(uaddr)) {
1713 0 : unsafe_get_user(c, uaddr, out);
1714 0 : uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
1715 : }
1716 0 : end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
1717 0 : if (unlikely(end < start))
1718 0 : end = NULL;
1719 0 : while (uaddr != end) {
1720 0 : unsafe_get_user(c, uaddr, out);
1721 0 : uaddr += PAGE_SIZE;
1722 : }
1723 :
1724 : out:
1725 : user_read_access_end();
1726 0 : (void)c;
1727 0 : if (size > uaddr - start)
1728 0 : return size - (uaddr - start);
1729 : return 0;
1730 : }
1731 : EXPORT_SYMBOL(fault_in_readable);
1732 :
1733 : /**
1734 : * get_dump_page() - pin user page in memory while writing it to core dump
1735 : * @addr: user address
1736 : *
1737 : * Returns struct page pointer of user page pinned for dump,
1738 : * to be freed afterwards by put_page().
1739 : *
1740 : * Returns NULL on any kind of failure - a hole must then be inserted into
1741 : * the corefile, to preserve alignment with its headers; and also returns
1742 : * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1743 : * allowing a hole to be left in the corefile to save disk space.
1744 : *
1745 : * Called without mmap_lock (takes and releases the mmap_lock by itself).
1746 : */
1747 : #ifdef CONFIG_ELF_CORE
1748 0 : struct page *get_dump_page(unsigned long addr)
1749 : {
1750 0 : struct mm_struct *mm = current->mm;
1751 : struct page *page;
1752 0 : int locked = 1;
1753 : int ret;
1754 :
1755 0 : if (mmap_read_lock_killable(mm))
1756 : return NULL;
1757 0 : ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1758 : FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1759 0 : if (locked)
1760 : mmap_read_unlock(mm);
1761 0 : return (ret == 1) ? page : NULL;
1762 : }
1763 : #endif /* CONFIG_ELF_CORE */
1764 :
1765 : #ifdef CONFIG_MIGRATION
1766 : /*
1767 : * Check whether all pages are pinnable, if so return number of pages. If some
1768 : * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1769 : * pages were migrated, or if some pages were not successfully isolated.
1770 : * Return negative error if migration fails.
1771 : */
1772 0 : static long check_and_migrate_movable_pages(unsigned long nr_pages,
1773 : struct page **pages,
1774 : unsigned int gup_flags)
1775 : {
1776 0 : unsigned long isolation_error_count = 0, i;
1777 0 : struct folio *prev_folio = NULL;
1778 0 : LIST_HEAD(movable_page_list);
1779 0 : bool drain_allow = true;
1780 0 : int ret = 0;
1781 :
1782 0 : for (i = 0; i < nr_pages; i++) {
1783 0 : struct folio *folio = page_folio(pages[i]);
1784 :
1785 0 : if (folio == prev_folio)
1786 0 : continue;
1787 0 : prev_folio = folio;
1788 :
1789 0 : if (folio_is_pinnable(folio))
1790 0 : continue;
1791 :
1792 : /*
1793 : * Try to move out any movable page before pinning the range.
1794 : */
1795 0 : if (folio_test_hugetlb(folio)) {
1796 : if (!isolate_huge_page(&folio->page,
1797 : &movable_page_list))
1798 : isolation_error_count++;
1799 : continue;
1800 : }
1801 :
1802 0 : if (!folio_test_lru(folio) && drain_allow) {
1803 0 : lru_add_drain_all();
1804 0 : drain_allow = false;
1805 : }
1806 :
1807 0 : if (folio_isolate_lru(folio)) {
1808 0 : isolation_error_count++;
1809 0 : continue;
1810 : }
1811 0 : list_add_tail(&folio->lru, &movable_page_list);
1812 0 : node_stat_mod_folio(folio,
1813 0 : NR_ISOLATED_ANON + folio_is_file_lru(folio),
1814 : folio_nr_pages(folio));
1815 : }
1816 :
1817 0 : if (!list_empty(&movable_page_list) || isolation_error_count)
1818 : goto unpin_pages;
1819 :
1820 : /*
1821 : * If list is empty, and no isolation errors, means that all pages are
1822 : * in the correct zone.
1823 : */
1824 0 : return nr_pages;
1825 :
1826 : unpin_pages:
1827 0 : if (gup_flags & FOLL_PIN) {
1828 0 : unpin_user_pages(pages, nr_pages);
1829 : } else {
1830 0 : for (i = 0; i < nr_pages; i++)
1831 0 : put_page(pages[i]);
1832 : }
1833 :
1834 0 : if (!list_empty(&movable_page_list)) {
1835 0 : struct migration_target_control mtc = {
1836 : .nid = NUMA_NO_NODE,
1837 : .gfp_mask = GFP_USER | __GFP_NOWARN,
1838 : };
1839 :
1840 0 : ret = migrate_pages(&movable_page_list, alloc_migration_target,
1841 : NULL, (unsigned long)&mtc, MIGRATE_SYNC,
1842 : MR_LONGTERM_PIN, NULL);
1843 0 : if (ret > 0) /* number of pages not migrated */
1844 0 : ret = -ENOMEM;
1845 : }
1846 :
1847 0 : if (ret && !list_empty(&movable_page_list))
1848 0 : putback_movable_pages(&movable_page_list);
1849 0 : return ret;
1850 : }
1851 : #else
1852 : static long check_and_migrate_movable_pages(unsigned long nr_pages,
1853 : struct page **pages,
1854 : unsigned int gup_flags)
1855 : {
1856 : return nr_pages;
1857 : }
1858 : #endif /* CONFIG_MIGRATION */
1859 :
1860 : /*
1861 : * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1862 : * allows us to process the FOLL_LONGTERM flag.
1863 : */
1864 0 : static long __gup_longterm_locked(struct mm_struct *mm,
1865 : unsigned long start,
1866 : unsigned long nr_pages,
1867 : struct page **pages,
1868 : struct vm_area_struct **vmas,
1869 : unsigned int gup_flags)
1870 : {
1871 : unsigned int flags;
1872 : long rc;
1873 :
1874 0 : if (!(gup_flags & FOLL_LONGTERM))
1875 0 : return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1876 : NULL, gup_flags);
1877 0 : flags = memalloc_pin_save();
1878 : do {
1879 0 : rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1880 : NULL, gup_flags);
1881 0 : if (rc <= 0)
1882 : break;
1883 0 : rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
1884 0 : } while (!rc);
1885 0 : memalloc_pin_restore(flags);
1886 :
1887 0 : return rc;
1888 : }
1889 :
1890 0 : static bool is_valid_gup_flags(unsigned int gup_flags)
1891 : {
1892 : /*
1893 : * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1894 : * never directly by the caller, so enforce that with an assertion:
1895 : */
1896 0 : if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1897 : return false;
1898 : /*
1899 : * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
1900 : * that is, FOLL_LONGTERM is a specific case, more restrictive case of
1901 : * FOLL_PIN.
1902 : */
1903 0 : if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1904 : return false;
1905 :
1906 0 : return true;
1907 : }
1908 :
1909 : #ifdef CONFIG_MMU
1910 0 : static long __get_user_pages_remote(struct mm_struct *mm,
1911 : unsigned long start, unsigned long nr_pages,
1912 : unsigned int gup_flags, struct page **pages,
1913 : struct vm_area_struct **vmas, int *locked)
1914 : {
1915 : /*
1916 : * Parts of FOLL_LONGTERM behavior are incompatible with
1917 : * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1918 : * vmas. However, this only comes up if locked is set, and there are
1919 : * callers that do request FOLL_LONGTERM, but do not set locked. So,
1920 : * allow what we can.
1921 : */
1922 0 : if (gup_flags & FOLL_LONGTERM) {
1923 0 : if (WARN_ON_ONCE(locked))
1924 : return -EINVAL;
1925 : /*
1926 : * This will check the vmas (even if our vmas arg is NULL)
1927 : * and return -ENOTSUPP if DAX isn't allowed in this case:
1928 : */
1929 0 : return __gup_longterm_locked(mm, start, nr_pages, pages,
1930 : vmas, gup_flags | FOLL_TOUCH |
1931 : FOLL_REMOTE);
1932 : }
1933 :
1934 0 : return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1935 : locked,
1936 : gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1937 : }
1938 :
1939 : /**
1940 : * get_user_pages_remote() - pin user pages in memory
1941 : * @mm: mm_struct of target mm
1942 : * @start: starting user address
1943 : * @nr_pages: number of pages from start to pin
1944 : * @gup_flags: flags modifying lookup behaviour
1945 : * @pages: array that receives pointers to the pages pinned.
1946 : * Should be at least nr_pages long. Or NULL, if caller
1947 : * only intends to ensure the pages are faulted in.
1948 : * @vmas: array of pointers to vmas corresponding to each page.
1949 : * Or NULL if the caller does not require them.
1950 : * @locked: pointer to lock flag indicating whether lock is held and
1951 : * subsequently whether VM_FAULT_RETRY functionality can be
1952 : * utilised. Lock must initially be held.
1953 : *
1954 : * Returns either number of pages pinned (which may be less than the
1955 : * number requested), or an error. Details about the return value:
1956 : *
1957 : * -- If nr_pages is 0, returns 0.
1958 : * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1959 : * -- If nr_pages is >0, and some pages were pinned, returns the number of
1960 : * pages pinned. Again, this may be less than nr_pages.
1961 : *
1962 : * The caller is responsible for releasing returned @pages, via put_page().
1963 : *
1964 : * @vmas are valid only as long as mmap_lock is held.
1965 : *
1966 : * Must be called with mmap_lock held for read or write.
1967 : *
1968 : * get_user_pages_remote walks a process's page tables and takes a reference
1969 : * to each struct page that each user address corresponds to at a given
1970 : * instant. That is, it takes the page that would be accessed if a user
1971 : * thread accesses the given user virtual address at that instant.
1972 : *
1973 : * This does not guarantee that the page exists in the user mappings when
1974 : * get_user_pages_remote returns, and there may even be a completely different
1975 : * page there in some cases (eg. if mmapped pagecache has been invalidated
1976 : * and subsequently re faulted). However it does guarantee that the page
1977 : * won't be freed completely. And mostly callers simply care that the page
1978 : * contains data that was valid *at some point in time*. Typically, an IO
1979 : * or similar operation cannot guarantee anything stronger anyway because
1980 : * locks can't be held over the syscall boundary.
1981 : *
1982 : * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1983 : * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1984 : * be called after the page is finished with, and before put_page is called.
1985 : *
1986 : * get_user_pages_remote is typically used for fewer-copy IO operations,
1987 : * to get a handle on the memory by some means other than accesses
1988 : * via the user virtual addresses. The pages may be submitted for
1989 : * DMA to devices or accessed via their kernel linear mapping (via the
1990 : * kmap APIs). Care should be taken to use the correct cache flushing APIs.
1991 : *
1992 : * See also get_user_pages_fast, for performance critical applications.
1993 : *
1994 : * get_user_pages_remote should be phased out in favor of
1995 : * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
1996 : * should use get_user_pages_remote because it cannot pass
1997 : * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1998 : */
1999 0 : long get_user_pages_remote(struct mm_struct *mm,
2000 : unsigned long start, unsigned long nr_pages,
2001 : unsigned int gup_flags, struct page **pages,
2002 : struct vm_area_struct **vmas, int *locked)
2003 : {
2004 0 : if (!is_valid_gup_flags(gup_flags))
2005 : return -EINVAL;
2006 :
2007 0 : return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
2008 : pages, vmas, locked);
2009 : }
2010 : EXPORT_SYMBOL(get_user_pages_remote);
2011 :
2012 : #else /* CONFIG_MMU */
2013 : long get_user_pages_remote(struct mm_struct *mm,
2014 : unsigned long start, unsigned long nr_pages,
2015 : unsigned int gup_flags, struct page **pages,
2016 : struct vm_area_struct **vmas, int *locked)
2017 : {
2018 : return 0;
2019 : }
2020 :
2021 : static long __get_user_pages_remote(struct mm_struct *mm,
2022 : unsigned long start, unsigned long nr_pages,
2023 : unsigned int gup_flags, struct page **pages,
2024 : struct vm_area_struct **vmas, int *locked)
2025 : {
2026 : return 0;
2027 : }
2028 : #endif /* !CONFIG_MMU */
2029 :
2030 : /**
2031 : * get_user_pages() - pin user pages in memory
2032 : * @start: starting user address
2033 : * @nr_pages: number of pages from start to pin
2034 : * @gup_flags: flags modifying lookup behaviour
2035 : * @pages: array that receives pointers to the pages pinned.
2036 : * Should be at least nr_pages long. Or NULL, if caller
2037 : * only intends to ensure the pages are faulted in.
2038 : * @vmas: array of pointers to vmas corresponding to each page.
2039 : * Or NULL if the caller does not require them.
2040 : *
2041 : * This is the same as get_user_pages_remote(), just with a less-flexible
2042 : * calling convention where we assume that the mm being operated on belongs to
2043 : * the current task, and doesn't allow passing of a locked parameter. We also
2044 : * obviously don't pass FOLL_REMOTE in here.
2045 : */
2046 0 : long get_user_pages(unsigned long start, unsigned long nr_pages,
2047 : unsigned int gup_flags, struct page **pages,
2048 : struct vm_area_struct **vmas)
2049 : {
2050 0 : if (!is_valid_gup_flags(gup_flags))
2051 : return -EINVAL;
2052 :
2053 0 : return __gup_longterm_locked(current->mm, start, nr_pages,
2054 : pages, vmas, gup_flags | FOLL_TOUCH);
2055 : }
2056 : EXPORT_SYMBOL(get_user_pages);
2057 :
2058 : /*
2059 : * get_user_pages_unlocked() is suitable to replace the form:
2060 : *
2061 : * mmap_read_lock(mm);
2062 : * get_user_pages(mm, ..., pages, NULL);
2063 : * mmap_read_unlock(mm);
2064 : *
2065 : * with:
2066 : *
2067 : * get_user_pages_unlocked(mm, ..., pages);
2068 : *
2069 : * It is functionally equivalent to get_user_pages_fast so
2070 : * get_user_pages_fast should be used instead if specific gup_flags
2071 : * (e.g. FOLL_FORCE) are not required.
2072 : */
2073 0 : long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2074 : struct page **pages, unsigned int gup_flags)
2075 : {
2076 0 : struct mm_struct *mm = current->mm;
2077 0 : int locked = 1;
2078 : long ret;
2079 :
2080 : /*
2081 : * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2082 : * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2083 : * vmas. As there are no users of this flag in this call we simply
2084 : * disallow this option for now.
2085 : */
2086 0 : if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2087 : return -EINVAL;
2088 :
2089 0 : mmap_read_lock(mm);
2090 0 : ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
2091 : &locked, gup_flags | FOLL_TOUCH);
2092 0 : if (locked)
2093 : mmap_read_unlock(mm);
2094 : return ret;
2095 : }
2096 : EXPORT_SYMBOL(get_user_pages_unlocked);
2097 :
2098 : /*
2099 : * Fast GUP
2100 : *
2101 : * get_user_pages_fast attempts to pin user pages by walking the page
2102 : * tables directly and avoids taking locks. Thus the walker needs to be
2103 : * protected from page table pages being freed from under it, and should
2104 : * block any THP splits.
2105 : *
2106 : * One way to achieve this is to have the walker disable interrupts, and
2107 : * rely on IPIs from the TLB flushing code blocking before the page table
2108 : * pages are freed. This is unsuitable for architectures that do not need
2109 : * to broadcast an IPI when invalidating TLBs.
2110 : *
2111 : * Another way to achieve this is to batch up page table containing pages
2112 : * belonging to more than one mm_user, then rcu_sched a callback to free those
2113 : * pages. Disabling interrupts will allow the fast_gup walker to both block
2114 : * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2115 : * (which is a relatively rare event). The code below adopts this strategy.
2116 : *
2117 : * Before activating this code, please be aware that the following assumptions
2118 : * are currently made:
2119 : *
2120 : * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2121 : * free pages containing page tables or TLB flushing requires IPI broadcast.
2122 : *
2123 : * *) ptes can be read atomically by the architecture.
2124 : *
2125 : * *) access_ok is sufficient to validate userspace address ranges.
2126 : *
2127 : * The last two assumptions can be relaxed by the addition of helper functions.
2128 : *
2129 : * This code is based heavily on the PowerPC implementation by Nick Piggin.
2130 : */
2131 : #ifdef CONFIG_HAVE_FAST_GUP
2132 :
2133 : static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2134 : unsigned int flags,
2135 : struct page **pages)
2136 : {
2137 : while ((*nr) - nr_start) {
2138 : struct page *page = pages[--(*nr)];
2139 :
2140 : ClearPageReferenced(page);
2141 : if (flags & FOLL_PIN)
2142 : unpin_user_page(page);
2143 : else
2144 : put_page(page);
2145 : }
2146 : }
2147 :
2148 : #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2149 : static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2150 : unsigned int flags, struct page **pages, int *nr)
2151 : {
2152 : struct dev_pagemap *pgmap = NULL;
2153 : int nr_start = *nr, ret = 0;
2154 : pte_t *ptep, *ptem;
2155 :
2156 : ptem = ptep = pte_offset_map(&pmd, addr);
2157 : do {
2158 : pte_t pte = ptep_get_lockless(ptep);
2159 : struct page *page;
2160 : struct folio *folio;
2161 :
2162 : /*
2163 : * Similar to the PMD case below, NUMA hinting must take slow
2164 : * path using the pte_protnone check.
2165 : */
2166 : if (pte_protnone(pte))
2167 : goto pte_unmap;
2168 :
2169 : if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2170 : goto pte_unmap;
2171 :
2172 : if (pte_devmap(pte)) {
2173 : if (unlikely(flags & FOLL_LONGTERM))
2174 : goto pte_unmap;
2175 :
2176 : pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2177 : if (unlikely(!pgmap)) {
2178 : undo_dev_pagemap(nr, nr_start, flags, pages);
2179 : goto pte_unmap;
2180 : }
2181 : } else if (pte_special(pte))
2182 : goto pte_unmap;
2183 :
2184 : VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2185 : page = pte_page(pte);
2186 :
2187 : folio = try_grab_folio(page, 1, flags);
2188 : if (!folio)
2189 : goto pte_unmap;
2190 :
2191 : if (unlikely(page_is_secretmem(page))) {
2192 : gup_put_folio(folio, 1, flags);
2193 : goto pte_unmap;
2194 : }
2195 :
2196 : if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2197 : gup_put_folio(folio, 1, flags);
2198 : goto pte_unmap;
2199 : }
2200 :
2201 : /*
2202 : * We need to make the page accessible if and only if we are
2203 : * going to access its content (the FOLL_PIN case). Please
2204 : * see Documentation/core-api/pin_user_pages.rst for
2205 : * details.
2206 : */
2207 : if (flags & FOLL_PIN) {
2208 : ret = arch_make_page_accessible(page);
2209 : if (ret) {
2210 : gup_put_folio(folio, 1, flags);
2211 : goto pte_unmap;
2212 : }
2213 : }
2214 : folio_set_referenced(folio);
2215 : pages[*nr] = page;
2216 : (*nr)++;
2217 : } while (ptep++, addr += PAGE_SIZE, addr != end);
2218 :
2219 : ret = 1;
2220 :
2221 : pte_unmap:
2222 : if (pgmap)
2223 : put_dev_pagemap(pgmap);
2224 : pte_unmap(ptem);
2225 : return ret;
2226 : }
2227 : #else
2228 :
2229 : /*
2230 : * If we can't determine whether or not a pte is special, then fail immediately
2231 : * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2232 : * to be special.
2233 : *
2234 : * For a futex to be placed on a THP tail page, get_futex_key requires a
2235 : * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2236 : * useful to have gup_huge_pmd even if we can't operate on ptes.
2237 : */
2238 : static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2239 : unsigned int flags, struct page **pages, int *nr)
2240 : {
2241 : return 0;
2242 : }
2243 : #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2244 :
2245 : #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2246 : static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2247 : unsigned long end, unsigned int flags,
2248 : struct page **pages, int *nr)
2249 : {
2250 : int nr_start = *nr;
2251 : struct dev_pagemap *pgmap = NULL;
2252 :
2253 : do {
2254 : struct page *page = pfn_to_page(pfn);
2255 :
2256 : pgmap = get_dev_pagemap(pfn, pgmap);
2257 : if (unlikely(!pgmap)) {
2258 : undo_dev_pagemap(nr, nr_start, flags, pages);
2259 : break;
2260 : }
2261 : SetPageReferenced(page);
2262 : pages[*nr] = page;
2263 : if (unlikely(!try_grab_page(page, flags))) {
2264 : undo_dev_pagemap(nr, nr_start, flags, pages);
2265 : break;
2266 : }
2267 : (*nr)++;
2268 : pfn++;
2269 : } while (addr += PAGE_SIZE, addr != end);
2270 :
2271 : put_dev_pagemap(pgmap);
2272 : return addr == end;
2273 : }
2274 :
2275 : static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2276 : unsigned long end, unsigned int flags,
2277 : struct page **pages, int *nr)
2278 : {
2279 : unsigned long fault_pfn;
2280 : int nr_start = *nr;
2281 :
2282 : fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2283 : if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2284 : return 0;
2285 :
2286 : if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2287 : undo_dev_pagemap(nr, nr_start, flags, pages);
2288 : return 0;
2289 : }
2290 : return 1;
2291 : }
2292 :
2293 : static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2294 : unsigned long end, unsigned int flags,
2295 : struct page **pages, int *nr)
2296 : {
2297 : unsigned long fault_pfn;
2298 : int nr_start = *nr;
2299 :
2300 : fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2301 : if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2302 : return 0;
2303 :
2304 : if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2305 : undo_dev_pagemap(nr, nr_start, flags, pages);
2306 : return 0;
2307 : }
2308 : return 1;
2309 : }
2310 : #else
2311 : static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2312 : unsigned long end, unsigned int flags,
2313 : struct page **pages, int *nr)
2314 : {
2315 : BUILD_BUG();
2316 : return 0;
2317 : }
2318 :
2319 : static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2320 : unsigned long end, unsigned int flags,
2321 : struct page **pages, int *nr)
2322 : {
2323 : BUILD_BUG();
2324 : return 0;
2325 : }
2326 : #endif
2327 :
2328 : static int record_subpages(struct page *page, unsigned long addr,
2329 : unsigned long end, struct page **pages)
2330 : {
2331 : int nr;
2332 :
2333 : for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
2334 : pages[nr] = nth_page(page, nr);
2335 :
2336 : return nr;
2337 : }
2338 :
2339 : #ifdef CONFIG_ARCH_HAS_HUGEPD
2340 : static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2341 : unsigned long sz)
2342 : {
2343 : unsigned long __boundary = (addr + sz) & ~(sz-1);
2344 : return (__boundary - 1 < end - 1) ? __boundary : end;
2345 : }
2346 :
2347 : static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2348 : unsigned long end, unsigned int flags,
2349 : struct page **pages, int *nr)
2350 : {
2351 : unsigned long pte_end;
2352 : struct page *page;
2353 : struct folio *folio;
2354 : pte_t pte;
2355 : int refs;
2356 :
2357 : pte_end = (addr + sz) & ~(sz-1);
2358 : if (pte_end < end)
2359 : end = pte_end;
2360 :
2361 : pte = huge_ptep_get(ptep);
2362 :
2363 : if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2364 : return 0;
2365 :
2366 : /* hugepages are never "special" */
2367 : VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2368 :
2369 : page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
2370 : refs = record_subpages(page, addr, end, pages + *nr);
2371 :
2372 : folio = try_grab_folio(page, refs, flags);
2373 : if (!folio)
2374 : return 0;
2375 :
2376 : if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2377 : gup_put_folio(folio, refs, flags);
2378 : return 0;
2379 : }
2380 :
2381 : *nr += refs;
2382 : folio_set_referenced(folio);
2383 : return 1;
2384 : }
2385 :
2386 : static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2387 : unsigned int pdshift, unsigned long end, unsigned int flags,
2388 : struct page **pages, int *nr)
2389 : {
2390 : pte_t *ptep;
2391 : unsigned long sz = 1UL << hugepd_shift(hugepd);
2392 : unsigned long next;
2393 :
2394 : ptep = hugepte_offset(hugepd, addr, pdshift);
2395 : do {
2396 : next = hugepte_addr_end(addr, end, sz);
2397 : if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2398 : return 0;
2399 : } while (ptep++, addr = next, addr != end);
2400 :
2401 : return 1;
2402 : }
2403 : #else
2404 : static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2405 : unsigned int pdshift, unsigned long end, unsigned int flags,
2406 : struct page **pages, int *nr)
2407 : {
2408 : return 0;
2409 : }
2410 : #endif /* CONFIG_ARCH_HAS_HUGEPD */
2411 :
2412 : static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2413 : unsigned long end, unsigned int flags,
2414 : struct page **pages, int *nr)
2415 : {
2416 : struct page *page;
2417 : struct folio *folio;
2418 : int refs;
2419 :
2420 : if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2421 : return 0;
2422 :
2423 : if (pmd_devmap(orig)) {
2424 : if (unlikely(flags & FOLL_LONGTERM))
2425 : return 0;
2426 : return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2427 : pages, nr);
2428 : }
2429 :
2430 : page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
2431 : refs = record_subpages(page, addr, end, pages + *nr);
2432 :
2433 : folio = try_grab_folio(page, refs, flags);
2434 : if (!folio)
2435 : return 0;
2436 :
2437 : if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2438 : gup_put_folio(folio, refs, flags);
2439 : return 0;
2440 : }
2441 :
2442 : *nr += refs;
2443 : folio_set_referenced(folio);
2444 : return 1;
2445 : }
2446 :
2447 : static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2448 : unsigned long end, unsigned int flags,
2449 : struct page **pages, int *nr)
2450 : {
2451 : struct page *page;
2452 : struct folio *folio;
2453 : int refs;
2454 :
2455 : if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2456 : return 0;
2457 :
2458 : if (pud_devmap(orig)) {
2459 : if (unlikely(flags & FOLL_LONGTERM))
2460 : return 0;
2461 : return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2462 : pages, nr);
2463 : }
2464 :
2465 : page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
2466 : refs = record_subpages(page, addr, end, pages + *nr);
2467 :
2468 : folio = try_grab_folio(page, refs, flags);
2469 : if (!folio)
2470 : return 0;
2471 :
2472 : if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2473 : gup_put_folio(folio, refs, flags);
2474 : return 0;
2475 : }
2476 :
2477 : *nr += refs;
2478 : folio_set_referenced(folio);
2479 : return 1;
2480 : }
2481 :
2482 : static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2483 : unsigned long end, unsigned int flags,
2484 : struct page **pages, int *nr)
2485 : {
2486 : int refs;
2487 : struct page *page;
2488 : struct folio *folio;
2489 :
2490 : if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2491 : return 0;
2492 :
2493 : BUILD_BUG_ON(pgd_devmap(orig));
2494 :
2495 : page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2496 : refs = record_subpages(page, addr, end, pages + *nr);
2497 :
2498 : folio = try_grab_folio(page, refs, flags);
2499 : if (!folio)
2500 : return 0;
2501 :
2502 : if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2503 : gup_put_folio(folio, refs, flags);
2504 : return 0;
2505 : }
2506 :
2507 : *nr += refs;
2508 : folio_set_referenced(folio);
2509 : return 1;
2510 : }
2511 :
2512 : static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2513 : unsigned int flags, struct page **pages, int *nr)
2514 : {
2515 : unsigned long next;
2516 : pmd_t *pmdp;
2517 :
2518 : pmdp = pmd_offset_lockless(pudp, pud, addr);
2519 : do {
2520 : pmd_t pmd = READ_ONCE(*pmdp);
2521 :
2522 : next = pmd_addr_end(addr, end);
2523 : if (!pmd_present(pmd))
2524 : return 0;
2525 :
2526 : if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2527 : pmd_devmap(pmd))) {
2528 : /*
2529 : * NUMA hinting faults need to be handled in the GUP
2530 : * slowpath for accounting purposes and so that they
2531 : * can be serialised against THP migration.
2532 : */
2533 : if (pmd_protnone(pmd))
2534 : return 0;
2535 :
2536 : if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2537 : pages, nr))
2538 : return 0;
2539 :
2540 : } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2541 : /*
2542 : * architecture have different format for hugetlbfs
2543 : * pmd format and THP pmd format
2544 : */
2545 : if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2546 : PMD_SHIFT, next, flags, pages, nr))
2547 : return 0;
2548 : } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2549 : return 0;
2550 : } while (pmdp++, addr = next, addr != end);
2551 :
2552 : return 1;
2553 : }
2554 :
2555 : static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
2556 : unsigned int flags, struct page **pages, int *nr)
2557 : {
2558 : unsigned long next;
2559 : pud_t *pudp;
2560 :
2561 : pudp = pud_offset_lockless(p4dp, p4d, addr);
2562 : do {
2563 : pud_t pud = READ_ONCE(*pudp);
2564 :
2565 : next = pud_addr_end(addr, end);
2566 : if (unlikely(!pud_present(pud)))
2567 : return 0;
2568 : if (unlikely(pud_huge(pud))) {
2569 : if (!gup_huge_pud(pud, pudp, addr, next, flags,
2570 : pages, nr))
2571 : return 0;
2572 : } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2573 : if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2574 : PUD_SHIFT, next, flags, pages, nr))
2575 : return 0;
2576 : } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2577 : return 0;
2578 : } while (pudp++, addr = next, addr != end);
2579 :
2580 : return 1;
2581 : }
2582 :
2583 : static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
2584 : unsigned int flags, struct page **pages, int *nr)
2585 : {
2586 : unsigned long next;
2587 : p4d_t *p4dp;
2588 :
2589 : p4dp = p4d_offset_lockless(pgdp, pgd, addr);
2590 : do {
2591 : p4d_t p4d = READ_ONCE(*p4dp);
2592 :
2593 : next = p4d_addr_end(addr, end);
2594 : if (p4d_none(p4d))
2595 : return 0;
2596 : BUILD_BUG_ON(p4d_huge(p4d));
2597 : if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2598 : if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2599 : P4D_SHIFT, next, flags, pages, nr))
2600 : return 0;
2601 : } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2602 : return 0;
2603 : } while (p4dp++, addr = next, addr != end);
2604 :
2605 : return 1;
2606 : }
2607 :
2608 : static void gup_pgd_range(unsigned long addr, unsigned long end,
2609 : unsigned int flags, struct page **pages, int *nr)
2610 : {
2611 : unsigned long next;
2612 : pgd_t *pgdp;
2613 :
2614 : pgdp = pgd_offset(current->mm, addr);
2615 : do {
2616 : pgd_t pgd = READ_ONCE(*pgdp);
2617 :
2618 : next = pgd_addr_end(addr, end);
2619 : if (pgd_none(pgd))
2620 : return;
2621 : if (unlikely(pgd_huge(pgd))) {
2622 : if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2623 : pages, nr))
2624 : return;
2625 : } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2626 : if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2627 : PGDIR_SHIFT, next, flags, pages, nr))
2628 : return;
2629 : } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2630 : return;
2631 : } while (pgdp++, addr = next, addr != end);
2632 : }
2633 : #else
2634 : static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2635 : unsigned int flags, struct page **pages, int *nr)
2636 : {
2637 : }
2638 : #endif /* CONFIG_HAVE_FAST_GUP */
2639 :
2640 : #ifndef gup_fast_permitted
2641 : /*
2642 : * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2643 : * we need to fall back to the slow version:
2644 : */
2645 : static bool gup_fast_permitted(unsigned long start, unsigned long end)
2646 : {
2647 : return true;
2648 : }
2649 : #endif
2650 :
2651 0 : static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2652 : unsigned int gup_flags, struct page **pages)
2653 : {
2654 : int ret;
2655 :
2656 : /*
2657 : * FIXME: FOLL_LONGTERM does not work with
2658 : * get_user_pages_unlocked() (see comments in that function)
2659 : */
2660 0 : if (gup_flags & FOLL_LONGTERM) {
2661 0 : mmap_read_lock(current->mm);
2662 0 : ret = __gup_longterm_locked(current->mm,
2663 : start, nr_pages,
2664 : pages, NULL, gup_flags);
2665 0 : mmap_read_unlock(current->mm);
2666 : } else {
2667 0 : ret = get_user_pages_unlocked(start, nr_pages,
2668 : pages, gup_flags);
2669 : }
2670 :
2671 0 : return ret;
2672 : }
2673 :
2674 : static unsigned long lockless_pages_from_mm(unsigned long start,
2675 : unsigned long end,
2676 : unsigned int gup_flags,
2677 : struct page **pages)
2678 : {
2679 : unsigned long flags;
2680 0 : int nr_pinned = 0;
2681 : unsigned seq;
2682 :
2683 : if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2684 : !gup_fast_permitted(start, end))
2685 : return 0;
2686 :
2687 : if (gup_flags & FOLL_PIN) {
2688 : seq = raw_read_seqcount(¤t->mm->write_protect_seq);
2689 : if (seq & 1)
2690 : return 0;
2691 : }
2692 :
2693 : /*
2694 : * Disable interrupts. The nested form is used, in order to allow full,
2695 : * general purpose use of this routine.
2696 : *
2697 : * With interrupts disabled, we block page table pages from being freed
2698 : * from under us. See struct mmu_table_batch comments in
2699 : * include/asm-generic/tlb.h for more details.
2700 : *
2701 : * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2702 : * that come from THPs splitting.
2703 : */
2704 : local_irq_save(flags);
2705 : gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2706 : local_irq_restore(flags);
2707 :
2708 : /*
2709 : * When pinning pages for DMA there could be a concurrent write protect
2710 : * from fork() via copy_page_range(), in this case always fail fast GUP.
2711 : */
2712 : if (gup_flags & FOLL_PIN) {
2713 : if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) {
2714 : unpin_user_pages(pages, nr_pinned);
2715 : return 0;
2716 : }
2717 : }
2718 : return nr_pinned;
2719 : }
2720 :
2721 0 : static int internal_get_user_pages_fast(unsigned long start,
2722 : unsigned long nr_pages,
2723 : unsigned int gup_flags,
2724 : struct page **pages)
2725 : {
2726 : unsigned long len, end;
2727 : unsigned long nr_pinned;
2728 : int ret;
2729 :
2730 0 : if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2731 : FOLL_FORCE | FOLL_PIN | FOLL_GET |
2732 : FOLL_FAST_ONLY | FOLL_NOFAULT)))
2733 : return -EINVAL;
2734 :
2735 0 : if (gup_flags & FOLL_PIN)
2736 0 : mm_set_has_pinned_flag(¤t->mm->flags);
2737 :
2738 0 : if (!(gup_flags & FOLL_FAST_ONLY))
2739 : might_lock_read(¤t->mm->mmap_lock);
2740 :
2741 0 : start = untagged_addr(start) & PAGE_MASK;
2742 0 : len = nr_pages << PAGE_SHIFT;
2743 0 : if (check_add_overflow(start, len, &end))
2744 : return 0;
2745 0 : if (unlikely(!access_ok((void __user *)start, len)))
2746 : return -EFAULT;
2747 :
2748 0 : nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2749 0 : if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2750 : return nr_pinned;
2751 :
2752 : /* Slow path: try to get the remaining pages with get_user_pages */
2753 0 : start += nr_pinned << PAGE_SHIFT;
2754 0 : pages += nr_pinned;
2755 0 : ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2756 : pages);
2757 : if (ret < 0) {
2758 : /*
2759 : * The caller has to unpin the pages we already pinned so
2760 : * returning -errno is not an option
2761 : */
2762 : if (nr_pinned)
2763 : return nr_pinned;
2764 : return ret;
2765 : }
2766 : return ret + nr_pinned;
2767 : }
2768 :
2769 : /**
2770 : * get_user_pages_fast_only() - pin user pages in memory
2771 : * @start: starting user address
2772 : * @nr_pages: number of pages from start to pin
2773 : * @gup_flags: flags modifying pin behaviour
2774 : * @pages: array that receives pointers to the pages pinned.
2775 : * Should be at least nr_pages long.
2776 : *
2777 : * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2778 : * the regular GUP.
2779 : * Note a difference with get_user_pages_fast: this always returns the
2780 : * number of pages pinned, 0 if no pages were pinned.
2781 : *
2782 : * If the architecture does not support this function, simply return with no
2783 : * pages pinned.
2784 : *
2785 : * Careful, careful! COW breaking can go either way, so a non-write
2786 : * access can get ambiguous page results. If you call this function without
2787 : * 'write' set, you'd better be sure that you're ok with that ambiguity.
2788 : */
2789 0 : int get_user_pages_fast_only(unsigned long start, int nr_pages,
2790 : unsigned int gup_flags, struct page **pages)
2791 : {
2792 : int nr_pinned;
2793 : /*
2794 : * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2795 : * because gup fast is always a "pin with a +1 page refcount" request.
2796 : *
2797 : * FOLL_FAST_ONLY is required in order to match the API description of
2798 : * this routine: no fall back to regular ("slow") GUP.
2799 : */
2800 0 : gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
2801 :
2802 0 : nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2803 : pages);
2804 :
2805 : /*
2806 : * As specified in the API description above, this routine is not
2807 : * allowed to return negative values. However, the common core
2808 : * routine internal_get_user_pages_fast() *can* return -errno.
2809 : * Therefore, correct for that here:
2810 : */
2811 0 : if (nr_pinned < 0)
2812 0 : nr_pinned = 0;
2813 :
2814 0 : return nr_pinned;
2815 : }
2816 : EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
2817 :
2818 : /**
2819 : * get_user_pages_fast() - pin user pages in memory
2820 : * @start: starting user address
2821 : * @nr_pages: number of pages from start to pin
2822 : * @gup_flags: flags modifying pin behaviour
2823 : * @pages: array that receives pointers to the pages pinned.
2824 : * Should be at least nr_pages long.
2825 : *
2826 : * Attempt to pin user pages in memory without taking mm->mmap_lock.
2827 : * If not successful, it will fall back to taking the lock and
2828 : * calling get_user_pages().
2829 : *
2830 : * Returns number of pages pinned. This may be fewer than the number requested.
2831 : * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2832 : * -errno.
2833 : */
2834 0 : int get_user_pages_fast(unsigned long start, int nr_pages,
2835 : unsigned int gup_flags, struct page **pages)
2836 : {
2837 0 : if (!is_valid_gup_flags(gup_flags))
2838 : return -EINVAL;
2839 :
2840 : /*
2841 : * The caller may or may not have explicitly set FOLL_GET; either way is
2842 : * OK. However, internally (within mm/gup.c), gup fast variants must set
2843 : * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2844 : * request.
2845 : */
2846 0 : gup_flags |= FOLL_GET;
2847 0 : return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2848 : }
2849 : EXPORT_SYMBOL_GPL(get_user_pages_fast);
2850 :
2851 : /**
2852 : * pin_user_pages_fast() - pin user pages in memory without taking locks
2853 : *
2854 : * @start: starting user address
2855 : * @nr_pages: number of pages from start to pin
2856 : * @gup_flags: flags modifying pin behaviour
2857 : * @pages: array that receives pointers to the pages pinned.
2858 : * Should be at least nr_pages long.
2859 : *
2860 : * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2861 : * get_user_pages_fast() for documentation on the function arguments, because
2862 : * the arguments here are identical.
2863 : *
2864 : * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2865 : * see Documentation/core-api/pin_user_pages.rst for further details.
2866 : */
2867 0 : int pin_user_pages_fast(unsigned long start, int nr_pages,
2868 : unsigned int gup_flags, struct page **pages)
2869 : {
2870 : /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2871 0 : if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2872 : return -EINVAL;
2873 :
2874 0 : gup_flags |= FOLL_PIN;
2875 0 : return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2876 : }
2877 : EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2878 :
2879 : /*
2880 : * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
2881 : * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
2882 : *
2883 : * The API rules are the same, too: no negative values may be returned.
2884 : */
2885 0 : int pin_user_pages_fast_only(unsigned long start, int nr_pages,
2886 : unsigned int gup_flags, struct page **pages)
2887 : {
2888 : int nr_pinned;
2889 :
2890 : /*
2891 : * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
2892 : * rules require returning 0, rather than -errno:
2893 : */
2894 0 : if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2895 : return 0;
2896 : /*
2897 : * FOLL_FAST_ONLY is required in order to match the API description of
2898 : * this routine: no fall back to regular ("slow") GUP.
2899 : */
2900 0 : gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
2901 0 : nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2902 : pages);
2903 : /*
2904 : * This routine is not allowed to return negative values. However,
2905 : * internal_get_user_pages_fast() *can* return -errno. Therefore,
2906 : * correct for that here:
2907 : */
2908 0 : if (nr_pinned < 0)
2909 0 : nr_pinned = 0;
2910 :
2911 : return nr_pinned;
2912 : }
2913 : EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
2914 :
2915 : /**
2916 : * pin_user_pages_remote() - pin pages of a remote process
2917 : *
2918 : * @mm: mm_struct of target mm
2919 : * @start: starting user address
2920 : * @nr_pages: number of pages from start to pin
2921 : * @gup_flags: flags modifying lookup behaviour
2922 : * @pages: array that receives pointers to the pages pinned.
2923 : * Should be at least nr_pages long. Or NULL, if caller
2924 : * only intends to ensure the pages are faulted in.
2925 : * @vmas: array of pointers to vmas corresponding to each page.
2926 : * Or NULL if the caller does not require them.
2927 : * @locked: pointer to lock flag indicating whether lock is held and
2928 : * subsequently whether VM_FAULT_RETRY functionality can be
2929 : * utilised. Lock must initially be held.
2930 : *
2931 : * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
2932 : * get_user_pages_remote() for documentation on the function arguments, because
2933 : * the arguments here are identical.
2934 : *
2935 : * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2936 : * see Documentation/core-api/pin_user_pages.rst for details.
2937 : */
2938 0 : long pin_user_pages_remote(struct mm_struct *mm,
2939 : unsigned long start, unsigned long nr_pages,
2940 : unsigned int gup_flags, struct page **pages,
2941 : struct vm_area_struct **vmas, int *locked)
2942 : {
2943 : /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2944 0 : if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2945 : return -EINVAL;
2946 :
2947 0 : gup_flags |= FOLL_PIN;
2948 0 : return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
2949 : pages, vmas, locked);
2950 : }
2951 : EXPORT_SYMBOL(pin_user_pages_remote);
2952 :
2953 : /**
2954 : * pin_user_pages() - pin user pages in memory for use by other devices
2955 : *
2956 : * @start: starting user address
2957 : * @nr_pages: number of pages from start to pin
2958 : * @gup_flags: flags modifying lookup behaviour
2959 : * @pages: array that receives pointers to the pages pinned.
2960 : * Should be at least nr_pages long. Or NULL, if caller
2961 : * only intends to ensure the pages are faulted in.
2962 : * @vmas: array of pointers to vmas corresponding to each page.
2963 : * Or NULL if the caller does not require them.
2964 : *
2965 : * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
2966 : * FOLL_PIN is set.
2967 : *
2968 : * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2969 : * see Documentation/core-api/pin_user_pages.rst for details.
2970 : */
2971 0 : long pin_user_pages(unsigned long start, unsigned long nr_pages,
2972 : unsigned int gup_flags, struct page **pages,
2973 : struct vm_area_struct **vmas)
2974 : {
2975 : /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2976 0 : if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2977 : return -EINVAL;
2978 :
2979 0 : gup_flags |= FOLL_PIN;
2980 0 : return __gup_longterm_locked(current->mm, start, nr_pages,
2981 : pages, vmas, gup_flags);
2982 : }
2983 : EXPORT_SYMBOL(pin_user_pages);
2984 :
2985 : /*
2986 : * pin_user_pages_unlocked() is the FOLL_PIN variant of
2987 : * get_user_pages_unlocked(). Behavior is the same, except that this one sets
2988 : * FOLL_PIN and rejects FOLL_GET.
2989 : */
2990 0 : long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2991 : struct page **pages, unsigned int gup_flags)
2992 : {
2993 : /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2994 0 : if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2995 : return -EINVAL;
2996 :
2997 0 : gup_flags |= FOLL_PIN;
2998 0 : return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
2999 : }
3000 : EXPORT_SYMBOL(pin_user_pages_unlocked);
|