Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_MMU_NOTIFIER_H
3 : #define _LINUX_MMU_NOTIFIER_H
4 :
5 : #include <linux/list.h>
6 : #include <linux/spinlock.h>
7 : #include <linux/mm_types.h>
8 : #include <linux/mmap_lock.h>
9 : #include <linux/srcu.h>
10 : #include <linux/interval_tree.h>
11 :
12 : struct mmu_notifier_subscriptions;
13 : struct mmu_notifier;
14 : struct mmu_notifier_range;
15 : struct mmu_interval_notifier;
16 :
17 : /**
18 : * enum mmu_notifier_event - reason for the mmu notifier callback
19 : * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
20 : * move the range
21 : *
22 : * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
23 : * madvise() or replacing a page by another one, ...).
24 : *
25 : * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
26 : * ie using the vma access permission (vm_page_prot) to update the whole range
27 : * is enough no need to inspect changes to the CPU page table (mprotect()
28 : * syscall)
29 : *
30 : * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
31 : * pages in the range so to mirror those changes the user must inspect the CPU
32 : * page table (from the end callback).
33 : *
34 : * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
35 : * access flags). User should soft dirty the page in the end callback to make
36 : * sure that anyone relying on soft dirtiness catch pages that might be written
37 : * through non CPU mappings.
38 : *
39 : * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
40 : * that the mm refcount is zero and the range is no longer accessible.
41 : *
42 : * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
43 : * a device driver to possibly ignore the invalidation if the
44 : * owner field matches the driver's device private pgmap owner.
45 : *
46 : * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
47 : * longer have exclusive access to the page. When sent during creation of an
48 : * exclusive range the owner will be initialised to the value provided by the
49 : * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
50 : */
51 : enum mmu_notifier_event {
52 : MMU_NOTIFY_UNMAP = 0,
53 : MMU_NOTIFY_CLEAR,
54 : MMU_NOTIFY_PROTECTION_VMA,
55 : MMU_NOTIFY_PROTECTION_PAGE,
56 : MMU_NOTIFY_SOFT_DIRTY,
57 : MMU_NOTIFY_RELEASE,
58 : MMU_NOTIFY_MIGRATE,
59 : MMU_NOTIFY_EXCLUSIVE,
60 : };
61 :
62 : #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
63 :
64 : struct mmu_notifier_ops {
65 : /*
66 : * Called either by mmu_notifier_unregister or when the mm is
67 : * being destroyed by exit_mmap, always before all pages are
68 : * freed. This can run concurrently with other mmu notifier
69 : * methods (the ones invoked outside the mm context) and it
70 : * should tear down all secondary mmu mappings and freeze the
71 : * secondary mmu. If this method isn't implemented you've to
72 : * be sure that nothing could possibly write to the pages
73 : * through the secondary mmu by the time the last thread with
74 : * tsk->mm == mm exits.
75 : *
76 : * As side note: the pages freed after ->release returns could
77 : * be immediately reallocated by the gart at an alias physical
78 : * address with a different cache model, so if ->release isn't
79 : * implemented because all _software_ driven memory accesses
80 : * through the secondary mmu are terminated by the time the
81 : * last thread of this mm quits, you've also to be sure that
82 : * speculative _hardware_ operations can't allocate dirty
83 : * cachelines in the cpu that could not be snooped and made
84 : * coherent with the other read and write operations happening
85 : * through the gart alias address, so leading to memory
86 : * corruption.
87 : */
88 : void (*release)(struct mmu_notifier *subscription,
89 : struct mm_struct *mm);
90 :
91 : /*
92 : * clear_flush_young is called after the VM is
93 : * test-and-clearing the young/accessed bitflag in the
94 : * pte. This way the VM will provide proper aging to the
95 : * accesses to the page through the secondary MMUs and not
96 : * only to the ones through the Linux pte.
97 : * Start-end is necessary in case the secondary MMU is mapping the page
98 : * at a smaller granularity than the primary MMU.
99 : */
100 : int (*clear_flush_young)(struct mmu_notifier *subscription,
101 : struct mm_struct *mm,
102 : unsigned long start,
103 : unsigned long end);
104 :
105 : /*
106 : * clear_young is a lightweight version of clear_flush_young. Like the
107 : * latter, it is supposed to test-and-clear the young/accessed bitflag
108 : * in the secondary pte, but it may omit flushing the secondary tlb.
109 : */
110 : int (*clear_young)(struct mmu_notifier *subscription,
111 : struct mm_struct *mm,
112 : unsigned long start,
113 : unsigned long end);
114 :
115 : /*
116 : * test_young is called to check the young/accessed bitflag in
117 : * the secondary pte. This is used to know if the page is
118 : * frequently used without actually clearing the flag or tearing
119 : * down the secondary mapping on the page.
120 : */
121 : int (*test_young)(struct mmu_notifier *subscription,
122 : struct mm_struct *mm,
123 : unsigned long address);
124 :
125 : /*
126 : * change_pte is called in cases that pte mapping to page is changed:
127 : * for example, when ksm remaps pte to point to a new shared page.
128 : */
129 : void (*change_pte)(struct mmu_notifier *subscription,
130 : struct mm_struct *mm,
131 : unsigned long address,
132 : pte_t pte);
133 :
134 : /*
135 : * invalidate_range_start() and invalidate_range_end() must be
136 : * paired and are called only when the mmap_lock and/or the
137 : * locks protecting the reverse maps are held. If the subsystem
138 : * can't guarantee that no additional references are taken to
139 : * the pages in the range, it has to implement the
140 : * invalidate_range() notifier to remove any references taken
141 : * after invalidate_range_start().
142 : *
143 : * Invalidation of multiple concurrent ranges may be
144 : * optionally permitted by the driver. Either way the
145 : * establishment of sptes is forbidden in the range passed to
146 : * invalidate_range_begin/end for the whole duration of the
147 : * invalidate_range_begin/end critical section.
148 : *
149 : * invalidate_range_start() is called when all pages in the
150 : * range are still mapped and have at least a refcount of one.
151 : *
152 : * invalidate_range_end() is called when all pages in the
153 : * range have been unmapped and the pages have been freed by
154 : * the VM.
155 : *
156 : * The VM will remove the page table entries and potentially
157 : * the page between invalidate_range_start() and
158 : * invalidate_range_end(). If the page must not be freed
159 : * because of pending I/O or other circumstances then the
160 : * invalidate_range_start() callback (or the initial mapping
161 : * by the driver) must make sure that the refcount is kept
162 : * elevated.
163 : *
164 : * If the driver increases the refcount when the pages are
165 : * initially mapped into an address space then either
166 : * invalidate_range_start() or invalidate_range_end() may
167 : * decrease the refcount. If the refcount is decreased on
168 : * invalidate_range_start() then the VM can free pages as page
169 : * table entries are removed. If the refcount is only
170 : * dropped on invalidate_range_end() then the driver itself
171 : * will drop the last refcount but it must take care to flush
172 : * any secondary tlb before doing the final free on the
173 : * page. Pages will no longer be referenced by the linux
174 : * address space but may still be referenced by sptes until
175 : * the last refcount is dropped.
176 : *
177 : * If blockable argument is set to false then the callback cannot
178 : * sleep and has to return with -EAGAIN if sleeping would be required.
179 : * 0 should be returned otherwise. Please note that notifiers that can
180 : * fail invalidate_range_start are not allowed to implement
181 : * invalidate_range_end, as there is no mechanism for informing the
182 : * notifier that its start failed.
183 : */
184 : int (*invalidate_range_start)(struct mmu_notifier *subscription,
185 : const struct mmu_notifier_range *range);
186 : void (*invalidate_range_end)(struct mmu_notifier *subscription,
187 : const struct mmu_notifier_range *range);
188 :
189 : /*
190 : * invalidate_range() is either called between
191 : * invalidate_range_start() and invalidate_range_end() when the
192 : * VM has to free pages that where unmapped, but before the
193 : * pages are actually freed, or outside of _start()/_end() when
194 : * a (remote) TLB is necessary.
195 : *
196 : * If invalidate_range() is used to manage a non-CPU TLB with
197 : * shared page-tables, it not necessary to implement the
198 : * invalidate_range_start()/end() notifiers, as
199 : * invalidate_range() already catches the points in time when an
200 : * external TLB range needs to be flushed. For more in depth
201 : * discussion on this see Documentation/vm/mmu_notifier.rst
202 : *
203 : * Note that this function might be called with just a sub-range
204 : * of what was passed to invalidate_range_start()/end(), if
205 : * called between those functions.
206 : */
207 : void (*invalidate_range)(struct mmu_notifier *subscription,
208 : struct mm_struct *mm,
209 : unsigned long start,
210 : unsigned long end);
211 :
212 : /*
213 : * These callbacks are used with the get/put interface to manage the
214 : * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
215 : * notifier for use with the mm.
216 : *
217 : * free_notifier() is only called after the mmu_notifier has been
218 : * fully put, calls to any ops callback are prevented and no ops
219 : * callbacks are currently running. It is called from a SRCU callback
220 : * and cannot sleep.
221 : */
222 : struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
223 : void (*free_notifier)(struct mmu_notifier *subscription);
224 : };
225 :
226 : /*
227 : * The notifier chains are protected by mmap_lock and/or the reverse map
228 : * semaphores. Notifier chains are only changed when all reverse maps and
229 : * the mmap_lock locks are taken.
230 : *
231 : * Therefore notifier chains can only be traversed when either
232 : *
233 : * 1. mmap_lock is held.
234 : * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
235 : * 3. No other concurrent thread can access the list (release)
236 : */
237 : struct mmu_notifier {
238 : struct hlist_node hlist;
239 : const struct mmu_notifier_ops *ops;
240 : struct mm_struct *mm;
241 : struct rcu_head rcu;
242 : unsigned int users;
243 : };
244 :
245 : /**
246 : * struct mmu_interval_notifier_ops
247 : * @invalidate: Upon return the caller must stop using any SPTEs within this
248 : * range. This function can sleep. Return false only if sleeping
249 : * was required but mmu_notifier_range_blockable(range) is false.
250 : */
251 : struct mmu_interval_notifier_ops {
252 : bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
253 : const struct mmu_notifier_range *range,
254 : unsigned long cur_seq);
255 : };
256 :
257 : struct mmu_interval_notifier {
258 : struct interval_tree_node interval_tree;
259 : const struct mmu_interval_notifier_ops *ops;
260 : struct mm_struct *mm;
261 : struct hlist_node deferred_item;
262 : unsigned long invalidate_seq;
263 : };
264 :
265 : #ifdef CONFIG_MMU_NOTIFIER
266 :
267 : #ifdef CONFIG_LOCKDEP
268 : extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
269 : #endif
270 :
271 : struct mmu_notifier_range {
272 : struct vm_area_struct *vma;
273 : struct mm_struct *mm;
274 : unsigned long start;
275 : unsigned long end;
276 : unsigned flags;
277 : enum mmu_notifier_event event;
278 : void *owner;
279 : };
280 :
281 : static inline int mm_has_notifiers(struct mm_struct *mm)
282 : {
283 : return unlikely(mm->notifier_subscriptions);
284 : }
285 :
286 : struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
287 : struct mm_struct *mm);
288 : static inline struct mmu_notifier *
289 : mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
290 : {
291 : struct mmu_notifier *ret;
292 :
293 : mmap_write_lock(mm);
294 : ret = mmu_notifier_get_locked(ops, mm);
295 : mmap_write_unlock(mm);
296 : return ret;
297 : }
298 : void mmu_notifier_put(struct mmu_notifier *subscription);
299 : void mmu_notifier_synchronize(void);
300 :
301 : extern int mmu_notifier_register(struct mmu_notifier *subscription,
302 : struct mm_struct *mm);
303 : extern int __mmu_notifier_register(struct mmu_notifier *subscription,
304 : struct mm_struct *mm);
305 : extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
306 : struct mm_struct *mm);
307 :
308 : unsigned long
309 : mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
310 : int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
311 : struct mm_struct *mm, unsigned long start,
312 : unsigned long length,
313 : const struct mmu_interval_notifier_ops *ops);
314 : int mmu_interval_notifier_insert_locked(
315 : struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
316 : unsigned long start, unsigned long length,
317 : const struct mmu_interval_notifier_ops *ops);
318 : void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
319 :
320 : /**
321 : * mmu_interval_set_seq - Save the invalidation sequence
322 : * @interval_sub - The subscription passed to invalidate
323 : * @cur_seq - The cur_seq passed to the invalidate() callback
324 : *
325 : * This must be called unconditionally from the invalidate callback of a
326 : * struct mmu_interval_notifier_ops under the same lock that is used to call
327 : * mmu_interval_read_retry(). It updates the sequence number for later use by
328 : * mmu_interval_read_retry(). The provided cur_seq will always be odd.
329 : *
330 : * If the caller does not call mmu_interval_read_begin() or
331 : * mmu_interval_read_retry() then this call is not required.
332 : */
333 : static inline void
334 : mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
335 : unsigned long cur_seq)
336 : {
337 : WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
338 : }
339 :
340 : /**
341 : * mmu_interval_read_retry - End a read side critical section against a VA range
342 : * interval_sub: The subscription
343 : * seq: The return of the paired mmu_interval_read_begin()
344 : *
345 : * This MUST be called under a user provided lock that is also held
346 : * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
347 : *
348 : * Each call should be paired with a single mmu_interval_read_begin() and
349 : * should be used to conclude the read side.
350 : *
351 : * Returns true if an invalidation collided with this critical section, and
352 : * the caller should retry.
353 : */
354 : static inline bool
355 : mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
356 : unsigned long seq)
357 : {
358 : return interval_sub->invalidate_seq != seq;
359 : }
360 :
361 : /**
362 : * mmu_interval_check_retry - Test if a collision has occurred
363 : * interval_sub: The subscription
364 : * seq: The return of the matching mmu_interval_read_begin()
365 : *
366 : * This can be used in the critical section between mmu_interval_read_begin()
367 : * and mmu_interval_read_retry(). A return of true indicates an invalidation
368 : * has collided with this critical region and a future
369 : * mmu_interval_read_retry() will return true.
370 : *
371 : * False is not reliable and only suggests a collision may not have
372 : * occurred. It can be called many times and does not have to hold the user
373 : * provided lock.
374 : *
375 : * This call can be used as part of loops and other expensive operations to
376 : * expedite a retry.
377 : */
378 : static inline bool
379 : mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
380 : unsigned long seq)
381 : {
382 : /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
383 : return READ_ONCE(interval_sub->invalidate_seq) != seq;
384 : }
385 :
386 : extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
387 : extern void __mmu_notifier_release(struct mm_struct *mm);
388 : extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
389 : unsigned long start,
390 : unsigned long end);
391 : extern int __mmu_notifier_clear_young(struct mm_struct *mm,
392 : unsigned long start,
393 : unsigned long end);
394 : extern int __mmu_notifier_test_young(struct mm_struct *mm,
395 : unsigned long address);
396 : extern void __mmu_notifier_change_pte(struct mm_struct *mm,
397 : unsigned long address, pte_t pte);
398 : extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
399 : extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
400 : bool only_end);
401 : extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
402 : unsigned long start, unsigned long end);
403 : extern bool
404 : mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
405 :
406 : static inline bool
407 : mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
408 : {
409 : return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
410 : }
411 :
412 : static inline void mmu_notifier_release(struct mm_struct *mm)
413 : {
414 : if (mm_has_notifiers(mm))
415 : __mmu_notifier_release(mm);
416 : }
417 :
418 : static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
419 : unsigned long start,
420 : unsigned long end)
421 : {
422 : if (mm_has_notifiers(mm))
423 : return __mmu_notifier_clear_flush_young(mm, start, end);
424 : return 0;
425 : }
426 :
427 : static inline int mmu_notifier_clear_young(struct mm_struct *mm,
428 : unsigned long start,
429 : unsigned long end)
430 : {
431 : if (mm_has_notifiers(mm))
432 : return __mmu_notifier_clear_young(mm, start, end);
433 : return 0;
434 : }
435 :
436 : static inline int mmu_notifier_test_young(struct mm_struct *mm,
437 : unsigned long address)
438 : {
439 : if (mm_has_notifiers(mm))
440 : return __mmu_notifier_test_young(mm, address);
441 : return 0;
442 : }
443 :
444 : static inline void mmu_notifier_change_pte(struct mm_struct *mm,
445 : unsigned long address, pte_t pte)
446 : {
447 : if (mm_has_notifiers(mm))
448 : __mmu_notifier_change_pte(mm, address, pte);
449 : }
450 :
451 : static inline void
452 : mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
453 : {
454 : might_sleep();
455 :
456 : lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
457 : if (mm_has_notifiers(range->mm)) {
458 : range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
459 : __mmu_notifier_invalidate_range_start(range);
460 : }
461 : lock_map_release(&__mmu_notifier_invalidate_range_start_map);
462 : }
463 :
464 : static inline int
465 : mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
466 : {
467 : int ret = 0;
468 :
469 : lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
470 : if (mm_has_notifiers(range->mm)) {
471 : range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
472 : ret = __mmu_notifier_invalidate_range_start(range);
473 : }
474 : lock_map_release(&__mmu_notifier_invalidate_range_start_map);
475 : return ret;
476 : }
477 :
478 : static inline void
479 : mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
480 : {
481 : if (mmu_notifier_range_blockable(range))
482 : might_sleep();
483 :
484 : if (mm_has_notifiers(range->mm))
485 : __mmu_notifier_invalidate_range_end(range, false);
486 : }
487 :
488 : static inline void
489 : mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
490 : {
491 : if (mm_has_notifiers(range->mm))
492 : __mmu_notifier_invalidate_range_end(range, true);
493 : }
494 :
495 : static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
496 : unsigned long start, unsigned long end)
497 : {
498 : if (mm_has_notifiers(mm))
499 : __mmu_notifier_invalidate_range(mm, start, end);
500 : }
501 :
502 : static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
503 : {
504 : mm->notifier_subscriptions = NULL;
505 : }
506 :
507 : static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
508 : {
509 : if (mm_has_notifiers(mm))
510 : __mmu_notifier_subscriptions_destroy(mm);
511 : }
512 :
513 :
514 : static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
515 : enum mmu_notifier_event event,
516 : unsigned flags,
517 : struct vm_area_struct *vma,
518 : struct mm_struct *mm,
519 : unsigned long start,
520 : unsigned long end)
521 : {
522 : range->vma = vma;
523 : range->event = event;
524 : range->mm = mm;
525 : range->start = start;
526 : range->end = end;
527 : range->flags = flags;
528 : }
529 :
530 : static inline void mmu_notifier_range_init_owner(
531 : struct mmu_notifier_range *range,
532 : enum mmu_notifier_event event, unsigned int flags,
533 : struct vm_area_struct *vma, struct mm_struct *mm,
534 : unsigned long start, unsigned long end, void *owner)
535 : {
536 : mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
537 : range->owner = owner;
538 : }
539 :
540 : #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
541 : ({ \
542 : int __young; \
543 : struct vm_area_struct *___vma = __vma; \
544 : unsigned long ___address = __address; \
545 : __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
546 : __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
547 : ___address, \
548 : ___address + \
549 : PAGE_SIZE); \
550 : __young; \
551 : })
552 :
553 : #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
554 : ({ \
555 : int __young; \
556 : struct vm_area_struct *___vma = __vma; \
557 : unsigned long ___address = __address; \
558 : __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
559 : __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
560 : ___address, \
561 : ___address + \
562 : PMD_SIZE); \
563 : __young; \
564 : })
565 :
566 : #define ptep_clear_young_notify(__vma, __address, __ptep) \
567 : ({ \
568 : int __young; \
569 : struct vm_area_struct *___vma = __vma; \
570 : unsigned long ___address = __address; \
571 : __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
572 : __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
573 : ___address + PAGE_SIZE); \
574 : __young; \
575 : })
576 :
577 : #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
578 : ({ \
579 : int __young; \
580 : struct vm_area_struct *___vma = __vma; \
581 : unsigned long ___address = __address; \
582 : __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
583 : __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
584 : ___address + PMD_SIZE); \
585 : __young; \
586 : })
587 :
588 : #define ptep_clear_flush_notify(__vma, __address, __ptep) \
589 : ({ \
590 : unsigned long ___addr = __address & PAGE_MASK; \
591 : struct mm_struct *___mm = (__vma)->vm_mm; \
592 : pte_t ___pte; \
593 : \
594 : ___pte = ptep_clear_flush(__vma, __address, __ptep); \
595 : mmu_notifier_invalidate_range(___mm, ___addr, \
596 : ___addr + PAGE_SIZE); \
597 : \
598 : ___pte; \
599 : })
600 :
601 : #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
602 : ({ \
603 : unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
604 : struct mm_struct *___mm = (__vma)->vm_mm; \
605 : pmd_t ___pmd; \
606 : \
607 : ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
608 : mmu_notifier_invalidate_range(___mm, ___haddr, \
609 : ___haddr + HPAGE_PMD_SIZE); \
610 : \
611 : ___pmd; \
612 : })
613 :
614 : #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
615 : ({ \
616 : unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
617 : struct mm_struct *___mm = (__vma)->vm_mm; \
618 : pud_t ___pud; \
619 : \
620 : ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
621 : mmu_notifier_invalidate_range(___mm, ___haddr, \
622 : ___haddr + HPAGE_PUD_SIZE); \
623 : \
624 : ___pud; \
625 : })
626 :
627 : /*
628 : * set_pte_at_notify() sets the pte _after_ running the notifier.
629 : * This is safe to start by updating the secondary MMUs, because the primary MMU
630 : * pte invalidate must have already happened with a ptep_clear_flush() before
631 : * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
632 : * required when we change both the protection of the mapping from read-only to
633 : * read-write and the pfn (like during copy on write page faults). Otherwise the
634 : * old page would remain mapped readonly in the secondary MMUs after the new
635 : * page is already writable by some CPU through the primary MMU.
636 : */
637 : #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
638 : ({ \
639 : struct mm_struct *___mm = __mm; \
640 : unsigned long ___address = __address; \
641 : pte_t ___pte = __pte; \
642 : \
643 : mmu_notifier_change_pte(___mm, ___address, ___pte); \
644 : set_pte_at(___mm, ___address, __ptep, ___pte); \
645 : })
646 :
647 : #else /* CONFIG_MMU_NOTIFIER */
648 :
649 : struct mmu_notifier_range {
650 : unsigned long start;
651 : unsigned long end;
652 : };
653 :
654 : static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
655 : unsigned long start,
656 : unsigned long end)
657 : {
658 0 : range->start = start;
659 0 : range->end = end;
660 : }
661 :
662 : #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
663 : _mmu_notifier_range_init(range, start, end)
664 : #define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
665 : end, owner) \
666 : _mmu_notifier_range_init(range, start, end)
667 :
668 : static inline bool
669 : mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
670 : {
671 : return true;
672 : }
673 :
674 : static inline int mm_has_notifiers(struct mm_struct *mm)
675 : {
676 : return 0;
677 : }
678 :
679 : static inline void mmu_notifier_release(struct mm_struct *mm)
680 : {
681 : }
682 :
683 : static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
684 : unsigned long start,
685 : unsigned long end)
686 : {
687 : return 0;
688 : }
689 :
690 : static inline int mmu_notifier_test_young(struct mm_struct *mm,
691 : unsigned long address)
692 : {
693 : return 0;
694 : }
695 :
696 : static inline void mmu_notifier_change_pte(struct mm_struct *mm,
697 : unsigned long address, pte_t pte)
698 : {
699 : }
700 :
701 : static inline void
702 : mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
703 : {
704 : }
705 :
706 : static inline int
707 : mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
708 : {
709 : return 0;
710 : }
711 :
712 : static inline
713 : void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
714 : {
715 : }
716 :
717 : static inline void
718 : mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
719 : {
720 : }
721 :
722 : static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
723 : unsigned long start, unsigned long end)
724 : {
725 : }
726 :
727 : static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
728 : {
729 : }
730 :
731 : static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
732 : {
733 : }
734 :
735 : #define mmu_notifier_range_update_to_read_only(r) false
736 :
737 : #define ptep_clear_flush_young_notify ptep_clear_flush_young
738 : #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
739 : #define ptep_clear_young_notify ptep_test_and_clear_young
740 : #define pmdp_clear_young_notify pmdp_test_and_clear_young
741 : #define ptep_clear_flush_notify ptep_clear_flush
742 : #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
743 : #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
744 : #define set_pte_at_notify set_pte_at
745 :
746 : static inline void mmu_notifier_synchronize(void)
747 : {
748 : }
749 :
750 : #endif /* CONFIG_MMU_NOTIFIER */
751 :
752 : #endif /* _LINUX_MMU_NOTIFIER_H */
|