LCOV - code coverage report
Current view: top level - mm - mremap.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 274 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 12 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  *      mm/mremap.c
       4             :  *
       5             :  *      (C) Copyright 1996 Linus Torvalds
       6             :  *
       7             :  *      Address space accounting code   <alan@lxorguk.ukuu.org.uk>
       8             :  *      (C) Copyright 2002 Red Hat Inc, All Rights Reserved
       9             :  */
      10             : 
      11             : #include <linux/mm.h>
      12             : #include <linux/hugetlb.h>
      13             : #include <linux/shm.h>
      14             : #include <linux/ksm.h>
      15             : #include <linux/mman.h>
      16             : #include <linux/swap.h>
      17             : #include <linux/capability.h>
      18             : #include <linux/fs.h>
      19             : #include <linux/swapops.h>
      20             : #include <linux/highmem.h>
      21             : #include <linux/security.h>
      22             : #include <linux/syscalls.h>
      23             : #include <linux/mmu_notifier.h>
      24             : #include <linux/uaccess.h>
      25             : #include <linux/userfaultfd_k.h>
      26             : 
      27             : #include <asm/cacheflush.h>
      28             : #include <asm/tlb.h>
      29             : #include <asm/pgalloc.h>
      30             : 
      31             : #include "internal.h"
      32             : 
      33             : static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
      34             : {
      35             :         pgd_t *pgd;
      36             :         p4d_t *p4d;
      37             :         pud_t *pud;
      38             : 
      39           0 :         pgd = pgd_offset(mm, addr);
      40             :         if (pgd_none_or_clear_bad(pgd))
      41             :                 return NULL;
      42             : 
      43           0 :         p4d = p4d_offset(pgd, addr);
      44             :         if (p4d_none_or_clear_bad(p4d))
      45             :                 return NULL;
      46             : 
      47           0 :         pud = pud_offset(p4d, addr);
      48           0 :         if (pud_none_or_clear_bad(pud))
      49             :                 return NULL;
      50             : 
      51             :         return pud;
      52             : }
      53             : 
      54           0 : static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
      55             : {
      56             :         pud_t *pud;
      57             :         pmd_t *pmd;
      58             : 
      59           0 :         pud = get_old_pud(mm, addr);
      60           0 :         if (!pud)
      61             :                 return NULL;
      62             : 
      63           0 :         pmd = pmd_offset(pud, addr);
      64           0 :         if (pmd_none(*pmd))
      65             :                 return NULL;
      66             : 
      67           0 :         return pmd;
      68             : }
      69             : 
      70             : static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
      71             :                             unsigned long addr)
      72             : {
      73             :         pgd_t *pgd;
      74             :         p4d_t *p4d;
      75             : 
      76           0 :         pgd = pgd_offset(mm, addr);
      77           0 :         p4d = p4d_alloc(mm, pgd, addr);
      78           0 :         if (!p4d)
      79             :                 return NULL;
      80             : 
      81           0 :         return pud_alloc(mm, p4d, addr);
      82             : }
      83             : 
      84           0 : static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
      85             :                             unsigned long addr)
      86             : {
      87             :         pud_t *pud;
      88             :         pmd_t *pmd;
      89             : 
      90           0 :         pud = alloc_new_pud(mm, vma, addr);
      91           0 :         if (!pud)
      92             :                 return NULL;
      93             : 
      94           0 :         pmd = pmd_alloc(mm, pud, addr);
      95           0 :         if (!pmd)
      96             :                 return NULL;
      97             : 
      98             :         VM_BUG_ON(pmd_trans_huge(*pmd));
      99             : 
     100             :         return pmd;
     101             : }
     102             : 
     103           0 : static void take_rmap_locks(struct vm_area_struct *vma)
     104             : {
     105           0 :         if (vma->vm_file)
     106           0 :                 i_mmap_lock_write(vma->vm_file->f_mapping);
     107           0 :         if (vma->anon_vma)
     108           0 :                 anon_vma_lock_write(vma->anon_vma);
     109           0 : }
     110             : 
     111           0 : static void drop_rmap_locks(struct vm_area_struct *vma)
     112             : {
     113           0 :         if (vma->anon_vma)
     114           0 :                 anon_vma_unlock_write(vma->anon_vma);
     115           0 :         if (vma->vm_file)
     116           0 :                 i_mmap_unlock_write(vma->vm_file->f_mapping);
     117           0 : }
     118             : 
     119             : static pte_t move_soft_dirty_pte(pte_t pte)
     120             : {
     121             :         /*
     122             :          * Set soft dirty bit so we can notice
     123             :          * in userspace the ptes were moved.
     124             :          */
     125             : #ifdef CONFIG_MEM_SOFT_DIRTY
     126             :         if (pte_present(pte))
     127             :                 pte = pte_mksoft_dirty(pte);
     128             :         else if (is_swap_pte(pte))
     129             :                 pte = pte_swp_mksoft_dirty(pte);
     130             : #endif
     131             :         return pte;
     132             : }
     133             : 
     134           0 : static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
     135             :                 unsigned long old_addr, unsigned long old_end,
     136             :                 struct vm_area_struct *new_vma, pmd_t *new_pmd,
     137             :                 unsigned long new_addr, bool need_rmap_locks)
     138             : {
     139           0 :         struct mm_struct *mm = vma->vm_mm;
     140             :         pte_t *old_pte, *new_pte, pte;
     141             :         spinlock_t *old_ptl, *new_ptl;
     142           0 :         bool force_flush = false;
     143           0 :         unsigned long len = old_end - old_addr;
     144             : 
     145             :         /*
     146             :          * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
     147             :          * locks to ensure that rmap will always observe either the old or the
     148             :          * new ptes. This is the easiest way to avoid races with
     149             :          * truncate_pagecache(), page migration, etc...
     150             :          *
     151             :          * When need_rmap_locks is false, we use other ways to avoid
     152             :          * such races:
     153             :          *
     154             :          * - During exec() shift_arg_pages(), we use a specially tagged vma
     155             :          *   which rmap call sites look for using vma_is_temporary_stack().
     156             :          *
     157             :          * - During mremap(), new_vma is often known to be placed after vma
     158             :          *   in rmap traversal order. This ensures rmap will always observe
     159             :          *   either the old pte, or the new pte, or both (the page table locks
     160             :          *   serialize access to individual ptes, but only rmap traversal
     161             :          *   order guarantees that we won't miss both the old and new ptes).
     162             :          */
     163           0 :         if (need_rmap_locks)
     164           0 :                 take_rmap_locks(vma);
     165             : 
     166             :         /*
     167             :          * We don't have to worry about the ordering of src and dst
     168             :          * pte locks because exclusive mmap_lock prevents deadlock.
     169             :          */
     170           0 :         old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
     171           0 :         new_pte = pte_offset_map(new_pmd, new_addr);
     172           0 :         new_ptl = pte_lockptr(mm, new_pmd);
     173             :         if (new_ptl != old_ptl)
     174             :                 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
     175           0 :         flush_tlb_batched_pending(vma->vm_mm);
     176             :         arch_enter_lazy_mmu_mode();
     177             : 
     178           0 :         for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
     179           0 :                                    new_pte++, new_addr += PAGE_SIZE) {
     180           0 :                 if (pte_none(*old_pte))
     181           0 :                         continue;
     182             : 
     183           0 :                 pte = ptep_get_and_clear(mm, old_addr, old_pte);
     184             :                 /*
     185             :                  * If we are remapping a valid PTE, make sure
     186             :                  * to flush TLB before we drop the PTL for the
     187             :                  * PTE.
     188             :                  *
     189             :                  * NOTE! Both old and new PTL matter: the old one
     190             :                  * for racing with page_mkclean(), the new one to
     191             :                  * make sure the physical page stays valid until
     192             :                  * the TLB entry for the old mapping has been
     193             :                  * flushed.
     194             :                  */
     195           0 :                 if (pte_present(pte))
     196           0 :                         force_flush = true;
     197           0 :                 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
     198             :                 pte = move_soft_dirty_pte(pte);
     199           0 :                 set_pte_at(mm, new_addr, new_pte, pte);
     200             :         }
     201             : 
     202             :         arch_leave_lazy_mmu_mode();
     203           0 :         if (force_flush)
     204           0 :                 flush_tlb_range(vma, old_end - len, old_end);
     205             :         if (new_ptl != old_ptl)
     206             :                 spin_unlock(new_ptl);
     207             :         pte_unmap(new_pte - 1);
     208           0 :         pte_unmap_unlock(old_pte - 1, old_ptl);
     209           0 :         if (need_rmap_locks)
     210           0 :                 drop_rmap_locks(vma);
     211           0 : }
     212             : 
     213             : #ifndef arch_supports_page_table_move
     214             : #define arch_supports_page_table_move arch_supports_page_table_move
     215             : static inline bool arch_supports_page_table_move(void)
     216             : {
     217             :         return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
     218             :                 IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
     219             : }
     220             : #endif
     221             : 
     222             : #ifdef CONFIG_HAVE_MOVE_PMD
     223             : static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
     224             :                   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
     225             : {
     226             :         spinlock_t *old_ptl, *new_ptl;
     227             :         struct mm_struct *mm = vma->vm_mm;
     228             :         pmd_t pmd;
     229             : 
     230             :         if (!arch_supports_page_table_move())
     231             :                 return false;
     232             :         /*
     233             :          * The destination pmd shouldn't be established, free_pgtables()
     234             :          * should have released it.
     235             :          *
     236             :          * However, there's a case during execve() where we use mremap
     237             :          * to move the initial stack, and in that case the target area
     238             :          * may overlap the source area (always moving down).
     239             :          *
     240             :          * If everything is PMD-aligned, that works fine, as moving
     241             :          * each pmd down will clear the source pmd. But if we first
     242             :          * have a few 4kB-only pages that get moved down, and then
     243             :          * hit the "now the rest is PMD-aligned, let's do everything
     244             :          * one pmd at a time", we will still have the old (now empty
     245             :          * of any 4kB pages, but still there) PMD in the page table
     246             :          * tree.
     247             :          *
     248             :          * Warn on it once - because we really should try to figure
     249             :          * out how to do this better - but then say "I won't move
     250             :          * this pmd".
     251             :          *
     252             :          * One alternative might be to just unmap the target pmd at
     253             :          * this point, and verify that it really is empty. We'll see.
     254             :          */
     255             :         if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
     256             :                 return false;
     257             : 
     258             :         /*
     259             :          * We don't have to worry about the ordering of src and dst
     260             :          * ptlocks because exclusive mmap_lock prevents deadlock.
     261             :          */
     262             :         old_ptl = pmd_lock(vma->vm_mm, old_pmd);
     263             :         new_ptl = pmd_lockptr(mm, new_pmd);
     264             :         if (new_ptl != old_ptl)
     265             :                 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
     266             : 
     267             :         /* Clear the pmd */
     268             :         pmd = *old_pmd;
     269             :         pmd_clear(old_pmd);
     270             : 
     271             :         VM_BUG_ON(!pmd_none(*new_pmd));
     272             : 
     273             :         pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
     274             :         flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
     275             :         if (new_ptl != old_ptl)
     276             :                 spin_unlock(new_ptl);
     277             :         spin_unlock(old_ptl);
     278             : 
     279             :         return true;
     280             : }
     281             : #else
     282             : static inline bool move_normal_pmd(struct vm_area_struct *vma,
     283             :                 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
     284             :                 pmd_t *new_pmd)
     285             : {
     286             :         return false;
     287             : }
     288             : #endif
     289             : 
     290             : #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
     291             : static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
     292             :                   unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
     293             : {
     294             :         spinlock_t *old_ptl, *new_ptl;
     295             :         struct mm_struct *mm = vma->vm_mm;
     296             :         pud_t pud;
     297             : 
     298             :         if (!arch_supports_page_table_move())
     299             :                 return false;
     300             :         /*
     301             :          * The destination pud shouldn't be established, free_pgtables()
     302             :          * should have released it.
     303             :          */
     304             :         if (WARN_ON_ONCE(!pud_none(*new_pud)))
     305             :                 return false;
     306             : 
     307             :         /*
     308             :          * We don't have to worry about the ordering of src and dst
     309             :          * ptlocks because exclusive mmap_lock prevents deadlock.
     310             :          */
     311             :         old_ptl = pud_lock(vma->vm_mm, old_pud);
     312             :         new_ptl = pud_lockptr(mm, new_pud);
     313             :         if (new_ptl != old_ptl)
     314             :                 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
     315             : 
     316             :         /* Clear the pud */
     317             :         pud = *old_pud;
     318             :         pud_clear(old_pud);
     319             : 
     320             :         VM_BUG_ON(!pud_none(*new_pud));
     321             : 
     322             :         pud_populate(mm, new_pud, pud_pgtable(pud));
     323             :         flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
     324             :         if (new_ptl != old_ptl)
     325             :                 spin_unlock(new_ptl);
     326             :         spin_unlock(old_ptl);
     327             : 
     328             :         return true;
     329             : }
     330             : #else
     331             : static inline bool move_normal_pud(struct vm_area_struct *vma,
     332             :                 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
     333             :                 pud_t *new_pud)
     334             : {
     335             :         return false;
     336             : }
     337             : #endif
     338             : 
     339             : #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
     340             : static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
     341             :                           unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
     342             : {
     343             :         spinlock_t *old_ptl, *new_ptl;
     344             :         struct mm_struct *mm = vma->vm_mm;
     345             :         pud_t pud;
     346             : 
     347             :         /*
     348             :          * The destination pud shouldn't be established, free_pgtables()
     349             :          * should have released it.
     350             :          */
     351             :         if (WARN_ON_ONCE(!pud_none(*new_pud)))
     352             :                 return false;
     353             : 
     354             :         /*
     355             :          * We don't have to worry about the ordering of src and dst
     356             :          * ptlocks because exclusive mmap_lock prevents deadlock.
     357             :          */
     358             :         old_ptl = pud_lock(vma->vm_mm, old_pud);
     359             :         new_ptl = pud_lockptr(mm, new_pud);
     360             :         if (new_ptl != old_ptl)
     361             :                 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
     362             : 
     363             :         /* Clear the pud */
     364             :         pud = *old_pud;
     365             :         pud_clear(old_pud);
     366             : 
     367             :         VM_BUG_ON(!pud_none(*new_pud));
     368             : 
     369             :         /* Set the new pud */
     370             :         /* mark soft_ditry when we add pud level soft dirty support */
     371             :         set_pud_at(mm, new_addr, new_pud, pud);
     372             :         flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
     373             :         if (new_ptl != old_ptl)
     374             :                 spin_unlock(new_ptl);
     375             :         spin_unlock(old_ptl);
     376             : 
     377             :         return true;
     378             : }
     379             : #else
     380             : static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
     381             :                           unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
     382             : {
     383             :         WARN_ON_ONCE(1);
     384             :         return false;
     385             : 
     386             : }
     387             : #endif
     388             : 
     389             : enum pgt_entry {
     390             :         NORMAL_PMD,
     391             :         HPAGE_PMD,
     392             :         NORMAL_PUD,
     393             :         HPAGE_PUD,
     394             : };
     395             : 
     396             : /*
     397             :  * Returns an extent of the corresponding size for the pgt_entry specified if
     398             :  * valid. Else returns a smaller extent bounded by the end of the source and
     399             :  * destination pgt_entry.
     400             :  */
     401             : static __always_inline unsigned long get_extent(enum pgt_entry entry,
     402             :                         unsigned long old_addr, unsigned long old_end,
     403             :                         unsigned long new_addr)
     404             : {
     405             :         unsigned long next, extent, mask, size;
     406             : 
     407             :         switch (entry) {
     408             :         case HPAGE_PMD:
     409             :         case NORMAL_PMD:
     410             :                 mask = PMD_MASK;
     411             :                 size = PMD_SIZE;
     412             :                 break;
     413             :         case HPAGE_PUD:
     414             :         case NORMAL_PUD:
     415           0 :                 mask = PUD_MASK;
     416           0 :                 size = PUD_SIZE;
     417             :                 break;
     418             :         default:
     419             :                 BUILD_BUG();
     420             :                 break;
     421             :         }
     422             : 
     423           0 :         next = (old_addr + size) & mask;
     424             :         /* even if next overflowed, extent below will be ok */
     425           0 :         extent = next - old_addr;
     426           0 :         if (extent > old_end - old_addr)
     427           0 :                 extent = old_end - old_addr;
     428           0 :         next = (new_addr + size) & mask;
     429           0 :         if (extent > next - new_addr)
     430           0 :                 extent = next - new_addr;
     431             :         return extent;
     432             : }
     433             : 
     434             : /*
     435             :  * Attempts to speedup the move by moving entry at the level corresponding to
     436             :  * pgt_entry. Returns true if the move was successful, else false.
     437             :  */
     438             : static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
     439             :                         unsigned long old_addr, unsigned long new_addr,
     440             :                         void *old_entry, void *new_entry, bool need_rmap_locks)
     441             : {
     442             :         bool moved = false;
     443             : 
     444             :         /* See comment in move_ptes() */
     445             :         if (need_rmap_locks)
     446             :                 take_rmap_locks(vma);
     447             : 
     448             :         switch (entry) {
     449             :         case NORMAL_PMD:
     450             :                 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
     451             :                                         new_entry);
     452             :                 break;
     453             :         case NORMAL_PUD:
     454             :                 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
     455             :                                         new_entry);
     456             :                 break;
     457             :         case HPAGE_PMD:
     458             :                 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
     459             :                         move_huge_pmd(vma, old_addr, new_addr, old_entry,
     460             :                                       new_entry);
     461             :                 break;
     462             :         case HPAGE_PUD:
     463             :                 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
     464             :                         move_huge_pud(vma, old_addr, new_addr, old_entry,
     465             :                                       new_entry);
     466             :                 break;
     467             : 
     468             :         default:
     469             :                 WARN_ON_ONCE(1);
     470             :                 break;
     471             :         }
     472             : 
     473             :         if (need_rmap_locks)
     474             :                 drop_rmap_locks(vma);
     475             : 
     476             :         return moved;
     477             : }
     478             : 
     479           0 : unsigned long move_page_tables(struct vm_area_struct *vma,
     480             :                 unsigned long old_addr, struct vm_area_struct *new_vma,
     481             :                 unsigned long new_addr, unsigned long len,
     482             :                 bool need_rmap_locks)
     483             : {
     484             :         unsigned long extent, old_end;
     485             :         struct mmu_notifier_range range;
     486             :         pmd_t *old_pmd, *new_pmd;
     487             :         pud_t *old_pud, *new_pud;
     488             : 
     489           0 :         if (!len)
     490             :                 return 0;
     491             : 
     492           0 :         old_end = old_addr + len;
     493           0 :         flush_cache_range(vma, old_addr, old_end);
     494             : 
     495           0 :         if (is_vm_hugetlb_page(vma))
     496             :                 return move_hugetlb_page_tables(vma, new_vma, old_addr,
     497             :                                                 new_addr, len);
     498             : 
     499             :         mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
     500             :                                 old_addr, old_end);
     501             :         mmu_notifier_invalidate_range_start(&range);
     502             : 
     503           0 :         for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
     504           0 :                 cond_resched();
     505             :                 /*
     506             :                  * If extent is PUD-sized try to speed up the move by moving at the
     507             :                  * PUD level if possible.
     508             :                  */
     509           0 :                 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
     510             : 
     511           0 :                 old_pud = get_old_pud(vma->vm_mm, old_addr);
     512           0 :                 if (!old_pud)
     513           0 :                         continue;
     514           0 :                 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
     515           0 :                 if (!new_pud)
     516             :                         break;
     517             :                 if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
     518             :                         if (extent == HPAGE_PUD_SIZE) {
     519             :                                 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
     520             :                                                old_pud, new_pud, need_rmap_locks);
     521             :                                 /* We ignore and continue on error? */
     522             :                                 continue;
     523             :                         }
     524             :                 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
     525             : 
     526             :                         if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
     527             :                                            old_pud, new_pud, true))
     528             :                                 continue;
     529             :                 }
     530             : 
     531           0 :                 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
     532           0 :                 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
     533           0 :                 if (!old_pmd)
     534           0 :                         continue;
     535           0 :                 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
     536           0 :                 if (!new_pmd)
     537             :                         break;
     538           0 :                 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
     539           0 :                     pmd_devmap(*old_pmd)) {
     540             :                         if (extent == HPAGE_PMD_SIZE &&
     541             :                             move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
     542             :                                            old_pmd, new_pmd, need_rmap_locks))
     543             :                                 continue;
     544             :                         split_huge_pmd(vma, old_pmd, old_addr);
     545             :                         if (pmd_trans_unstable(old_pmd))
     546             :                                 continue;
     547             :                 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
     548             :                            extent == PMD_SIZE) {
     549             :                         /*
     550             :                          * If the extent is PMD-sized, try to speed the move by
     551             :                          * moving at the PMD level if possible.
     552             :                          */
     553             :                         if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
     554             :                                            old_pmd, new_pmd, true))
     555             :                                 continue;
     556             :                 }
     557             : 
     558           0 :                 if (pte_alloc(new_vma->vm_mm, new_pmd))
     559             :                         break;
     560           0 :                 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
     561             :                           new_pmd, new_addr, need_rmap_locks);
     562             :         }
     563             : 
     564           0 :         mmu_notifier_invalidate_range_end(&range);
     565             : 
     566           0 :         return len + old_addr - old_end;        /* how much done */
     567             : }
     568             : 
     569           0 : static unsigned long move_vma(struct vm_area_struct *vma,
     570             :                 unsigned long old_addr, unsigned long old_len,
     571             :                 unsigned long new_len, unsigned long new_addr,
     572             :                 bool *locked, unsigned long flags,
     573             :                 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
     574             : {
     575           0 :         long to_account = new_len - old_len;
     576           0 :         struct mm_struct *mm = vma->vm_mm;
     577             :         struct vm_area_struct *new_vma;
     578           0 :         unsigned long vm_flags = vma->vm_flags;
     579             :         unsigned long new_pgoff;
     580             :         unsigned long moved_len;
     581           0 :         unsigned long excess = 0;
     582             :         unsigned long hiwater_vm;
     583           0 :         int split = 0;
     584           0 :         int err = 0;
     585             :         bool need_rmap_locks;
     586             : 
     587             :         /*
     588             :          * We'd prefer to avoid failure later on in do_munmap:
     589             :          * which may split one vma into three before unmapping.
     590             :          */
     591           0 :         if (mm->map_count >= sysctl_max_map_count - 3)
     592             :                 return -ENOMEM;
     593             : 
     594           0 :         if (unlikely(flags & MREMAP_DONTUNMAP))
     595           0 :                 to_account = new_len;
     596             : 
     597           0 :         if (vma->vm_ops && vma->vm_ops->may_split) {
     598           0 :                 if (vma->vm_start != old_addr)
     599           0 :                         err = vma->vm_ops->may_split(vma, old_addr);
     600           0 :                 if (!err && vma->vm_end != old_addr + old_len)
     601           0 :                         err = vma->vm_ops->may_split(vma, old_addr + old_len);
     602           0 :                 if (err)
     603           0 :                         return err;
     604             :         }
     605             : 
     606             :         /*
     607             :          * Advise KSM to break any KSM pages in the area to be moved:
     608             :          * it would be confusing if they were to turn up at the new
     609             :          * location, where they happen to coincide with different KSM
     610             :          * pages recently unmapped.  But leave vma->vm_flags as it was,
     611             :          * so KSM can come around to merge on vma and new_vma afterwards.
     612             :          */
     613           0 :         err = ksm_madvise(vma, old_addr, old_addr + old_len,
     614             :                                                 MADV_UNMERGEABLE, &vm_flags);
     615             :         if (err)
     616             :                 return err;
     617             : 
     618           0 :         if (vm_flags & VM_ACCOUNT) {
     619           0 :                 if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
     620             :                         return -ENOMEM;
     621             :         }
     622             : 
     623           0 :         new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
     624           0 :         new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
     625             :                            &need_rmap_locks);
     626           0 :         if (!new_vma) {
     627           0 :                 if (vm_flags & VM_ACCOUNT)
     628           0 :                         vm_unacct_memory(to_account >> PAGE_SHIFT);
     629             :                 return -ENOMEM;
     630             :         }
     631             : 
     632           0 :         moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
     633             :                                      need_rmap_locks);
     634           0 :         if (moved_len < old_len) {
     635             :                 err = -ENOMEM;
     636           0 :         } else if (vma->vm_ops && vma->vm_ops->mremap) {
     637           0 :                 err = vma->vm_ops->mremap(new_vma);
     638             :         }
     639             : 
     640           0 :         if (unlikely(err)) {
     641             :                 /*
     642             :                  * On error, move entries back from new area to old,
     643             :                  * which will succeed since page tables still there,
     644             :                  * and then proceed to unmap new area instead of old.
     645             :                  */
     646           0 :                 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
     647             :                                  true);
     648           0 :                 vma = new_vma;
     649           0 :                 old_len = new_len;
     650           0 :                 old_addr = new_addr;
     651           0 :                 new_addr = err;
     652             :         } else {
     653             :                 mremap_userfaultfd_prep(new_vma, uf);
     654             :         }
     655             : 
     656           0 :         if (is_vm_hugetlb_page(vma)) {
     657             :                 clear_vma_resv_huge_pages(vma);
     658             :         }
     659             : 
     660             :         /* Conceal VM_ACCOUNT so old reservation is not undone */
     661           0 :         if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
     662           0 :                 vma->vm_flags &= ~VM_ACCOUNT;
     663           0 :                 excess = vma->vm_end - vma->vm_start - old_len;
     664           0 :                 if (old_addr > vma->vm_start &&
     665           0 :                     old_addr + old_len < vma->vm_end)
     666           0 :                         split = 1;
     667             :         }
     668             : 
     669             :         /*
     670             :          * If we failed to move page tables we still do total_vm increment
     671             :          * since do_munmap() will decrement it by old_len == new_len.
     672             :          *
     673             :          * Since total_vm is about to be raised artificially high for a
     674             :          * moment, we need to restore high watermark afterwards: if stats
     675             :          * are taken meanwhile, total_vm and hiwater_vm appear too high.
     676             :          * If this were a serious issue, we'd add a flag to do_munmap().
     677             :          */
     678           0 :         hiwater_vm = mm->hiwater_vm;
     679           0 :         vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
     680             : 
     681             :         /* Tell pfnmap has moved from this vma */
     682           0 :         if (unlikely(vma->vm_flags & VM_PFNMAP))
     683             :                 untrack_pfn_moved(vma);
     684             : 
     685           0 :         if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
     686             :                 /* We always clear VM_LOCKED[ONFAULT] on the old vma */
     687           0 :                 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
     688             : 
     689             :                 /*
     690             :                  * anon_vma links of the old vma is no longer needed after its page
     691             :                  * table has been moved.
     692             :                  */
     693           0 :                 if (new_vma != vma && vma->vm_start == old_addr &&
     694           0 :                         vma->vm_end == (old_addr + old_len))
     695           0 :                         unlink_anon_vmas(vma);
     696             : 
     697             :                 /* Because we won't unmap we don't need to touch locked_vm */
     698             :                 return new_addr;
     699             :         }
     700             : 
     701           0 :         if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
     702             :                 /* OOM: unable to split vma, just get accounts right */
     703           0 :                 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
     704           0 :                         vm_acct_memory(old_len >> PAGE_SHIFT);
     705             :                 excess = 0;
     706             :         }
     707             : 
     708           0 :         if (vm_flags & VM_LOCKED) {
     709           0 :                 mm->locked_vm += new_len >> PAGE_SHIFT;
     710           0 :                 *locked = true;
     711             :         }
     712             : 
     713           0 :         mm->hiwater_vm = hiwater_vm;
     714             : 
     715             :         /* Restore VM_ACCOUNT if one or two pieces of vma left */
     716           0 :         if (excess) {
     717           0 :                 vma->vm_flags |= VM_ACCOUNT;
     718           0 :                 if (split)
     719           0 :                         vma->vm_next->vm_flags |= VM_ACCOUNT;
     720             :         }
     721             : 
     722             :         return new_addr;
     723             : }
     724             : 
     725           0 : static struct vm_area_struct *vma_to_resize(unsigned long addr,
     726             :         unsigned long old_len, unsigned long new_len, unsigned long flags)
     727             : {
     728           0 :         struct mm_struct *mm = current->mm;
     729             :         struct vm_area_struct *vma;
     730             :         unsigned long pgoff;
     731             : 
     732           0 :         vma = vma_lookup(mm, addr);
     733           0 :         if (!vma)
     734             :                 return ERR_PTR(-EFAULT);
     735             : 
     736             :         /*
     737             :          * !old_len is a special case where an attempt is made to 'duplicate'
     738             :          * a mapping.  This makes no sense for private mappings as it will
     739             :          * instead create a fresh/new mapping unrelated to the original.  This
     740             :          * is contrary to the basic idea of mremap which creates new mappings
     741             :          * based on the original.  There are no known use cases for this
     742             :          * behavior.  As a result, fail such attempts.
     743             :          */
     744           0 :         if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
     745           0 :                 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap.  This is not supported.\n", current->comm, current->pid);
     746             :                 return ERR_PTR(-EINVAL);
     747             :         }
     748             : 
     749           0 :         if ((flags & MREMAP_DONTUNMAP) &&
     750           0 :                         (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
     751             :                 return ERR_PTR(-EINVAL);
     752             : 
     753             :         /* We can't remap across vm area boundaries */
     754           0 :         if (old_len > vma->vm_end - addr)
     755             :                 return ERR_PTR(-EFAULT);
     756             : 
     757           0 :         if (new_len == old_len)
     758             :                 return vma;
     759             : 
     760             :         /* Need to be careful about a growing mapping */
     761           0 :         pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
     762           0 :         pgoff += vma->vm_pgoff;
     763           0 :         if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
     764             :                 return ERR_PTR(-EINVAL);
     765             : 
     766           0 :         if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
     767             :                 return ERR_PTR(-EFAULT);
     768             : 
     769           0 :         if (vma->vm_flags & VM_LOCKED) {
     770             :                 unsigned long locked, lock_limit;
     771           0 :                 locked = mm->locked_vm << PAGE_SHIFT;
     772           0 :                 lock_limit = rlimit(RLIMIT_MEMLOCK);
     773           0 :                 locked += new_len - old_len;
     774           0 :                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
     775             :                         return ERR_PTR(-EAGAIN);
     776             :         }
     777             : 
     778           0 :         if (!may_expand_vm(mm, vma->vm_flags,
     779           0 :                                 (new_len - old_len) >> PAGE_SHIFT))
     780             :                 return ERR_PTR(-ENOMEM);
     781             : 
     782           0 :         return vma;
     783             : }
     784             : 
     785           0 : static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
     786             :                 unsigned long new_addr, unsigned long new_len, bool *locked,
     787             :                 unsigned long flags, struct vm_userfaultfd_ctx *uf,
     788             :                 struct list_head *uf_unmap_early,
     789             :                 struct list_head *uf_unmap)
     790             : {
     791           0 :         struct mm_struct *mm = current->mm;
     792             :         struct vm_area_struct *vma;
     793           0 :         unsigned long ret = -EINVAL;
     794           0 :         unsigned long map_flags = 0;
     795             : 
     796           0 :         if (offset_in_page(new_addr))
     797             :                 goto out;
     798             : 
     799           0 :         if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
     800             :                 goto out;
     801             : 
     802             :         /* Ensure the old/new locations do not overlap */
     803           0 :         if (addr + old_len > new_addr && new_addr + new_len > addr)
     804             :                 goto out;
     805             : 
     806             :         /*
     807             :          * move_vma() need us to stay 4 maps below the threshold, otherwise
     808             :          * it will bail out at the very beginning.
     809             :          * That is a problem if we have already unmaped the regions here
     810             :          * (new_addr, and old_addr), because userspace will not know the
     811             :          * state of the vma's after it gets -ENOMEM.
     812             :          * So, to avoid such scenario we can pre-compute if the whole
     813             :          * operation has high chances to success map-wise.
     814             :          * Worst-scenario case is when both vma's (new_addr and old_addr) get
     815             :          * split in 3 before unmapping it.
     816             :          * That means 2 more maps (1 for each) to the ones we already hold.
     817             :          * Check whether current map count plus 2 still leads us to 4 maps below
     818             :          * the threshold, otherwise return -ENOMEM here to be more safe.
     819             :          */
     820           0 :         if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
     821             :                 return -ENOMEM;
     822             : 
     823           0 :         if (flags & MREMAP_FIXED) {
     824           0 :                 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
     825           0 :                 if (ret)
     826             :                         goto out;
     827             :         }
     828             : 
     829           0 :         if (old_len >= new_len) {
     830           0 :                 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
     831           0 :                 if (ret && old_len != new_len)
     832             :                         goto out;
     833             :                 old_len = new_len;
     834             :         }
     835             : 
     836           0 :         vma = vma_to_resize(addr, old_len, new_len, flags);
     837           0 :         if (IS_ERR(vma)) {
     838             :                 ret = PTR_ERR(vma);
     839             :                 goto out;
     840             :         }
     841             : 
     842             :         /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
     843           0 :         if (flags & MREMAP_DONTUNMAP &&
     844           0 :                 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
     845             :                 ret = -ENOMEM;
     846             :                 goto out;
     847             :         }
     848             : 
     849           0 :         if (flags & MREMAP_FIXED)
     850           0 :                 map_flags |= MAP_FIXED;
     851             : 
     852           0 :         if (vma->vm_flags & VM_MAYSHARE)
     853           0 :                 map_flags |= MAP_SHARED;
     854             : 
     855           0 :         ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
     856           0 :                                 ((addr - vma->vm_start) >> PAGE_SHIFT),
     857             :                                 map_flags);
     858           0 :         if (IS_ERR_VALUE(ret))
     859             :                 goto out;
     860             : 
     861             :         /* We got a new mapping */
     862           0 :         if (!(flags & MREMAP_FIXED))
     863           0 :                 new_addr = ret;
     864             : 
     865           0 :         ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
     866             :                        uf_unmap);
     867             : 
     868             : out:
     869             :         return ret;
     870             : }
     871             : 
     872           0 : static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
     873             : {
     874           0 :         unsigned long end = vma->vm_end + delta;
     875           0 :         if (end < vma->vm_end) /* overflow */
     876             :                 return 0;
     877           0 :         if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
     878             :                 return 0;
     879           0 :         if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
     880           0 :                               0, MAP_FIXED) & ~PAGE_MASK)
     881             :                 return 0;
     882           0 :         return 1;
     883             : }
     884             : 
     885             : /*
     886             :  * Expand (or shrink) an existing mapping, potentially moving it at the
     887             :  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
     888             :  *
     889             :  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
     890             :  * This option implies MREMAP_MAYMOVE.
     891             :  */
     892           0 : SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
     893             :                 unsigned long, new_len, unsigned long, flags,
     894             :                 unsigned long, new_addr)
     895             : {
     896           0 :         struct mm_struct *mm = current->mm;
     897             :         struct vm_area_struct *vma;
     898           0 :         unsigned long ret = -EINVAL;
     899           0 :         bool locked = false;
     900           0 :         bool downgraded = false;
     901             :         struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
     902           0 :         LIST_HEAD(uf_unmap_early);
     903           0 :         LIST_HEAD(uf_unmap);
     904             : 
     905             :         /*
     906             :          * There is a deliberate asymmetry here: we strip the pointer tag
     907             :          * from the old address but leave the new address alone. This is
     908             :          * for consistency with mmap(), where we prevent the creation of
     909             :          * aliasing mappings in userspace by leaving the tag bits of the
     910             :          * mapping address intact. A non-zero tag will cause the subsequent
     911             :          * range checks to reject the address as invalid.
     912             :          *
     913             :          * See Documentation/arm64/tagged-address-abi.rst for more information.
     914             :          */
     915           0 :         addr = untagged_addr(addr);
     916             : 
     917           0 :         if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
     918             :                 return ret;
     919             : 
     920           0 :         if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
     921             :                 return ret;
     922             : 
     923             :         /*
     924             :          * MREMAP_DONTUNMAP is always a move and it does not allow resizing
     925             :          * in the process.
     926             :          */
     927           0 :         if (flags & MREMAP_DONTUNMAP &&
     928           0 :                         (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
     929             :                 return ret;
     930             : 
     931             : 
     932           0 :         if (offset_in_page(addr))
     933             :                 return ret;
     934             : 
     935           0 :         old_len = PAGE_ALIGN(old_len);
     936           0 :         new_len = PAGE_ALIGN(new_len);
     937             : 
     938             :         /*
     939             :          * We allow a zero old-len as a special case
     940             :          * for DOS-emu "duplicate shm area" thing. But
     941             :          * a zero new-len is nonsensical.
     942             :          */
     943           0 :         if (!new_len)
     944             :                 return ret;
     945             : 
     946           0 :         if (mmap_write_lock_killable(current->mm))
     947             :                 return -EINTR;
     948           0 :         vma = vma_lookup(mm, addr);
     949           0 :         if (!vma) {
     950             :                 ret = -EFAULT;
     951             :                 goto out;
     952             :         }
     953             : 
     954             :         if (is_vm_hugetlb_page(vma)) {
     955             :                 struct hstate *h __maybe_unused = hstate_vma(vma);
     956             : 
     957             :                 old_len = ALIGN(old_len, huge_page_size(h));
     958             :                 new_len = ALIGN(new_len, huge_page_size(h));
     959             : 
     960             :                 /* addrs must be huge page aligned */
     961             :                 if (addr & ~huge_page_mask(h))
     962             :                         goto out;
     963             :                 if (new_addr & ~huge_page_mask(h))
     964             :                         goto out;
     965             : 
     966             :                 /*
     967             :                  * Don't allow remap expansion, because the underlying hugetlb
     968             :                  * reservation is not yet capable to handle split reservation.
     969             :                  */
     970             :                 if (new_len > old_len)
     971             :                         goto out;
     972             :         }
     973             : 
     974           0 :         if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
     975           0 :                 ret = mremap_to(addr, old_len, new_addr, new_len,
     976             :                                 &locked, flags, &uf, &uf_unmap_early,
     977             :                                 &uf_unmap);
     978           0 :                 goto out;
     979             :         }
     980             : 
     981             :         /*
     982             :          * Always allow a shrinking remap: that just unmaps
     983             :          * the unnecessary pages..
     984             :          * __do_munmap does all the needed commit accounting, and
     985             :          * downgrades mmap_lock to read if so directed.
     986             :          */
     987           0 :         if (old_len >= new_len) {
     988             :                 int retval;
     989             : 
     990           0 :                 retval = __do_munmap(mm, addr+new_len, old_len - new_len,
     991             :                                   &uf_unmap, true);
     992           0 :                 if (retval < 0 && old_len != new_len) {
     993           0 :                         ret = retval;
     994           0 :                         goto out;
     995             :                 /* Returning 1 indicates mmap_lock is downgraded to read. */
     996           0 :                 } else if (retval == 1)
     997           0 :                         downgraded = true;
     998             :                 ret = addr;
     999             :                 goto out;
    1000             :         }
    1001             : 
    1002             :         /*
    1003             :          * Ok, we need to grow..
    1004             :          */
    1005           0 :         vma = vma_to_resize(addr, old_len, new_len, flags);
    1006           0 :         if (IS_ERR(vma)) {
    1007             :                 ret = PTR_ERR(vma);
    1008             :                 goto out;
    1009             :         }
    1010             : 
    1011             :         /* old_len exactly to the end of the area..
    1012             :          */
    1013           0 :         if (old_len == vma->vm_end - addr) {
    1014             :                 /* can we just expand the current mapping? */
    1015           0 :                 if (vma_expandable(vma, new_len - old_len)) {
    1016           0 :                         long pages = (new_len - old_len) >> PAGE_SHIFT;
    1017             : 
    1018           0 :                         if (vma->vm_flags & VM_ACCOUNT) {
    1019           0 :                                 if (security_vm_enough_memory_mm(mm, pages)) {
    1020             :                                         ret = -ENOMEM;
    1021             :                                         goto out;
    1022             :                                 }
    1023             :                         }
    1024             : 
    1025           0 :                         if (vma_adjust(vma, vma->vm_start, addr + new_len,
    1026             :                                        vma->vm_pgoff, NULL)) {
    1027           0 :                                 vm_unacct_memory(pages);
    1028           0 :                                 ret = -ENOMEM;
    1029           0 :                                 goto out;
    1030             :                         }
    1031             : 
    1032           0 :                         vm_stat_account(mm, vma->vm_flags, pages);
    1033           0 :                         if (vma->vm_flags & VM_LOCKED) {
    1034           0 :                                 mm->locked_vm += pages;
    1035           0 :                                 locked = true;
    1036           0 :                                 new_addr = addr;
    1037             :                         }
    1038             :                         ret = addr;
    1039             :                         goto out;
    1040             :                 }
    1041             :         }
    1042             : 
    1043             :         /*
    1044             :          * We weren't able to just expand or shrink the area,
    1045             :          * we need to create a new one and move it..
    1046             :          */
    1047           0 :         ret = -ENOMEM;
    1048           0 :         if (flags & MREMAP_MAYMOVE) {
    1049           0 :                 unsigned long map_flags = 0;
    1050           0 :                 if (vma->vm_flags & VM_MAYSHARE)
    1051           0 :                         map_flags |= MAP_SHARED;
    1052             : 
    1053           0 :                 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
    1054           0 :                                         vma->vm_pgoff +
    1055           0 :                                         ((addr - vma->vm_start) >> PAGE_SHIFT),
    1056             :                                         map_flags);
    1057           0 :                 if (IS_ERR_VALUE(new_addr)) {
    1058             :                         ret = new_addr;
    1059             :                         goto out;
    1060             :                 }
    1061             : 
    1062           0 :                 ret = move_vma(vma, addr, old_len, new_len, new_addr,
    1063             :                                &locked, flags, &uf, &uf_unmap);
    1064             :         }
    1065             : out:
    1066           0 :         if (offset_in_page(ret))
    1067           0 :                 locked = false;
    1068           0 :         if (downgraded)
    1069           0 :                 mmap_read_unlock(current->mm);
    1070             :         else
    1071           0 :                 mmap_write_unlock(current->mm);
    1072           0 :         if (locked && new_len > old_len)
    1073           0 :                 mm_populate(new_addr + old_len, new_len - old_len);
    1074           0 :         userfaultfd_unmap_complete(mm, &uf_unmap_early);
    1075           0 :         mremap_userfaultfd_complete(&uf, addr, ret, old_len);
    1076           0 :         userfaultfd_unmap_complete(mm, &uf_unmap);
    1077           0 :         return ret;
    1078             : }

Generated by: LCOV version 1.14