Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : #include <linux/mm.h>
3 : #include <linux/rmap.h>
4 : #include <linux/hugetlb.h>
5 : #include <linux/swap.h>
6 : #include <linux/swapops.h>
7 :
8 : #include "internal.h"
9 :
10 : static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 : {
12 0 : page_vma_mapped_walk_done(pvmw);
13 : return false;
14 : }
15 :
16 0 : static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 : {
18 0 : pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 0 : if (!(pvmw->flags & PVMW_SYNC)) {
20 0 : if (pvmw->flags & PVMW_MIGRATION) {
21 0 : if (!is_swap_pte(*pvmw->pte))
22 : return false;
23 : } else {
24 : /*
25 : * We get here when we are trying to unmap a private
26 : * device page from the process address space. Such
27 : * page is not CPU accessible and thus is mapped as
28 : * a special swap entry, nonetheless it still does
29 : * count as a valid regular mapping for the page (and
30 : * is accounted as such in page maps count).
31 : *
32 : * So handle this special case as if it was a normal
33 : * page mapping ie lock CPU page table and returns
34 : * true.
35 : *
36 : * For more details on device private memory see HMM
37 : * (include/linux/hmm.h or mm/hmm.c).
38 : */
39 0 : if (is_swap_pte(*pvmw->pte)) {
40 : swp_entry_t entry;
41 :
42 : /* Handle un-addressable ZONE_DEVICE memory */
43 0 : entry = pte_to_swp_entry(*pvmw->pte);
44 0 : if (!is_device_private_entry(entry) &&
45 0 : !is_device_exclusive_entry(entry))
46 : return false;
47 0 : } else if (!pte_present(*pvmw->pte))
48 : return false;
49 : }
50 : }
51 0 : pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
52 0 : spin_lock(pvmw->ptl);
53 0 : return true;
54 : }
55 :
56 : /**
57 : * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
58 : * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
59 : *
60 : * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
61 : * mapped. check_pte() has to validate this.
62 : *
63 : * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
64 : * arbitrary page.
65 : *
66 : * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
67 : * entry that points to @pvmw->page or any subpage in case of THP.
68 : *
69 : * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
70 : * pvmw->page or any subpage in case of THP.
71 : *
72 : * Otherwise, return false.
73 : *
74 : */
75 0 : static bool check_pte(struct page_vma_mapped_walk *pvmw)
76 : {
77 : unsigned long pfn;
78 :
79 0 : if (pvmw->flags & PVMW_MIGRATION) {
80 : swp_entry_t entry;
81 0 : if (!is_swap_pte(*pvmw->pte))
82 : return false;
83 0 : entry = pte_to_swp_entry(*pvmw->pte);
84 :
85 0 : if (!is_migration_entry(entry) &&
86 : !is_device_exclusive_entry(entry))
87 : return false;
88 :
89 0 : pfn = swp_offset(entry);
90 0 : } else if (is_swap_pte(*pvmw->pte)) {
91 : swp_entry_t entry;
92 :
93 : /* Handle un-addressable ZONE_DEVICE memory */
94 : entry = pte_to_swp_entry(*pvmw->pte);
95 : if (!is_device_private_entry(entry) &&
96 : !is_device_exclusive_entry(entry))
97 : return false;
98 :
99 : pfn = swp_offset(entry);
100 : } else {
101 0 : if (!pte_present(*pvmw->pte))
102 : return false;
103 :
104 0 : pfn = pte_pfn(*pvmw->pte);
105 : }
106 :
107 0 : return (pfn - pvmw->pfn) < pvmw->nr_pages;
108 : }
109 :
110 : /* Returns true if the two ranges overlap. Careful to not overflow. */
111 : static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
112 : {
113 : if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
114 : return false;
115 : if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
116 : return false;
117 : return true;
118 : }
119 :
120 : static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
121 : {
122 0 : pvmw->address = (pvmw->address + size) & ~(size - 1);
123 0 : if (!pvmw->address)
124 0 : pvmw->address = ULONG_MAX;
125 : }
126 :
127 : /**
128 : * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
129 : * @pvmw->address
130 : * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
131 : * must be set. pmd, pte and ptl must be NULL.
132 : *
133 : * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
134 : * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
135 : * adjusted if needed (for PTE-mapped THPs).
136 : *
137 : * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
138 : * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
139 : * a loop to find all PTEs that map the THP.
140 : *
141 : * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
142 : * regardless of which page table level the page is mapped at. @pvmw->pmd is
143 : * NULL.
144 : *
145 : * Returns false if there are no more page table entries for the page in
146 : * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
147 : *
148 : * If you need to stop the walk before page_vma_mapped_walk() returned false,
149 : * use page_vma_mapped_walk_done(). It will do the housekeeping.
150 : */
151 0 : bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
152 : {
153 0 : struct vm_area_struct *vma = pvmw->vma;
154 0 : struct mm_struct *mm = vma->vm_mm;
155 : unsigned long end;
156 : pgd_t *pgd;
157 : p4d_t *p4d;
158 : pud_t *pud;
159 : pmd_t pmde;
160 :
161 : /* The only possible pmd mapping has been handled on last iteration */
162 0 : if (pvmw->pmd && !pvmw->pte)
163 : return not_found(pvmw);
164 :
165 0 : if (unlikely(is_vm_hugetlb_page(vma))) {
166 : struct hstate *hstate = hstate_vma(vma);
167 : unsigned long size = huge_page_size(hstate);
168 : /* The only possible mapping was handled on last iteration */
169 : if (pvmw->pte)
170 : return not_found(pvmw);
171 :
172 : /* when pud is not present, pte will be NULL */
173 : pvmw->pte = huge_pte_offset(mm, pvmw->address, size);
174 : if (!pvmw->pte)
175 : return false;
176 :
177 : pvmw->ptl = huge_pte_lockptr(hstate, mm, pvmw->pte);
178 : spin_lock(pvmw->ptl);
179 : if (!check_pte(pvmw))
180 : return not_found(pvmw);
181 : return true;
182 : }
183 :
184 0 : end = vma_address_end(pvmw);
185 0 : if (pvmw->pte)
186 : goto next_pte;
187 : restart:
188 : do {
189 0 : pgd = pgd_offset(mm, pvmw->address);
190 : if (!pgd_present(*pgd)) {
191 : step_forward(pvmw, PGDIR_SIZE);
192 : continue;
193 : }
194 0 : p4d = p4d_offset(pgd, pvmw->address);
195 : if (!p4d_present(*p4d)) {
196 : step_forward(pvmw, P4D_SIZE);
197 : continue;
198 : }
199 0 : pud = pud_offset(p4d, pvmw->address);
200 0 : if (!pud_present(*pud)) {
201 0 : step_forward(pvmw, PUD_SIZE);
202 0 : continue;
203 : }
204 :
205 0 : pvmw->pmd = pmd_offset(pud, pvmw->address);
206 : /*
207 : * Make sure the pmd value isn't cached in a register by the
208 : * compiler and used as a stale value after we've observed a
209 : * subsequent update.
210 : */
211 0 : pmde = READ_ONCE(*pvmw->pmd);
212 :
213 0 : if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
214 : pvmw->ptl = pmd_lock(mm, pvmw->pmd);
215 : pmde = *pvmw->pmd;
216 : if (likely(pmd_trans_huge(pmde))) {
217 : if (pvmw->flags & PVMW_MIGRATION)
218 : return not_found(pvmw);
219 : if (!check_pmd(pmd_pfn(pmde), pvmw))
220 : return not_found(pvmw);
221 : return true;
222 : }
223 : if (!pmd_present(pmde)) {
224 : swp_entry_t entry;
225 :
226 : if (!thp_migration_supported() ||
227 : !(pvmw->flags & PVMW_MIGRATION))
228 : return not_found(pvmw);
229 : entry = pmd_to_swp_entry(pmde);
230 : if (!is_migration_entry(entry) ||
231 : !check_pmd(swp_offset(entry), pvmw))
232 : return not_found(pvmw);
233 : return true;
234 : }
235 : /* THP pmd was split under us: handle on pte level */
236 : spin_unlock(pvmw->ptl);
237 : pvmw->ptl = NULL;
238 0 : } else if (!pmd_present(pmde)) {
239 : /*
240 : * If PVMW_SYNC, take and drop THP pmd lock so that we
241 : * cannot return prematurely, while zap_huge_pmd() has
242 : * cleared *pmd but not decremented compound_mapcount().
243 : */
244 : if ((pvmw->flags & PVMW_SYNC) &&
245 : transparent_hugepage_active(vma) &&
246 : (pvmw->nr_pages >= HPAGE_PMD_NR)) {
247 : spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
248 :
249 : spin_unlock(ptl);
250 : }
251 0 : step_forward(pvmw, PMD_SIZE);
252 0 : continue;
253 : }
254 0 : if (!map_pte(pvmw))
255 : goto next_pte;
256 : this_pte:
257 0 : if (check_pte(pvmw))
258 : return true;
259 : next_pte:
260 : do {
261 0 : pvmw->address += PAGE_SIZE;
262 0 : if (pvmw->address >= end)
263 : return not_found(pvmw);
264 : /* Did we cross page table boundary? */
265 0 : if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
266 0 : if (pvmw->ptl) {
267 0 : spin_unlock(pvmw->ptl);
268 0 : pvmw->ptl = NULL;
269 : }
270 : pte_unmap(pvmw->pte);
271 0 : pvmw->pte = NULL;
272 0 : goto restart;
273 : }
274 0 : pvmw->pte++;
275 0 : if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
276 0 : pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
277 0 : spin_lock(pvmw->ptl);
278 : }
279 0 : } while (pte_none(*pvmw->pte));
280 :
281 0 : if (!pvmw->ptl) {
282 0 : pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
283 0 : spin_lock(pvmw->ptl);
284 : }
285 : goto this_pte;
286 0 : } while (pvmw->address < end);
287 :
288 : return false;
289 : }
290 :
291 : /**
292 : * page_mapped_in_vma - check whether a page is really mapped in a VMA
293 : * @page: the page to test
294 : * @vma: the VMA to test
295 : *
296 : * Returns 1 if the page is mapped into the page tables of the VMA, 0
297 : * if the page is not mapped into the page tables of this VMA. Only
298 : * valid for normal file or anonymous VMAs.
299 : */
300 0 : int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
301 : {
302 0 : struct page_vma_mapped_walk pvmw = {
303 0 : .pfn = page_to_pfn(page),
304 : .nr_pages = 1,
305 : .vma = vma,
306 : .flags = PVMW_SYNC,
307 : };
308 :
309 0 : pvmw.address = vma_address(page, vma);
310 0 : if (pvmw.address == -EFAULT)
311 : return 0;
312 0 : if (!page_vma_mapped_walk(&pvmw))
313 : return 0;
314 0 : page_vma_mapped_walk_done(&pvmw);
315 : return 1;
316 : }
|