Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_HUGETLB_H
3 : #define _LINUX_HUGETLB_H
4 :
5 : #include <linux/mm_types.h>
6 : #include <linux/mmdebug.h>
7 : #include <linux/fs.h>
8 : #include <linux/hugetlb_inline.h>
9 : #include <linux/cgroup.h>
10 : #include <linux/list.h>
11 : #include <linux/kref.h>
12 : #include <linux/pgtable.h>
13 : #include <linux/gfp.h>
14 : #include <linux/userfaultfd_k.h>
15 :
16 : struct ctl_table;
17 : struct user_struct;
18 : struct mmu_gather;
19 :
20 : #ifndef is_hugepd
21 : typedef struct { unsigned long pd; } hugepd_t;
22 : #define is_hugepd(hugepd) (0)
23 : #define __hugepd(x) ((hugepd_t) { (x) })
24 : #endif
25 :
26 : #ifdef CONFIG_HUGETLB_PAGE
27 :
28 : #include <linux/mempolicy.h>
29 : #include <linux/shm.h>
30 : #include <asm/tlbflush.h>
31 :
32 : /*
33 : * For HugeTLB page, there are more metadata to save in the struct page. But
34 : * the head struct page cannot meet our needs, so we have to abuse other tail
35 : * struct page to store the metadata. In order to avoid conflicts caused by
36 : * subsequent use of more tail struct pages, we gather these discrete indexes
37 : * of tail struct page here.
38 : */
39 : enum {
40 : SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
41 : #ifdef CONFIG_CGROUP_HUGETLB
42 : SUBPAGE_INDEX_CGROUP, /* reuse page->private */
43 : SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
44 : __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
45 : #endif
46 : __NR_USED_SUBPAGE,
47 : };
48 :
49 : struct hugepage_subpool {
50 : spinlock_t lock;
51 : long count;
52 : long max_hpages; /* Maximum huge pages or -1 if no maximum. */
53 : long used_hpages; /* Used count against maximum, includes */
54 : /* both allocated and reserved pages. */
55 : struct hstate *hstate;
56 : long min_hpages; /* Minimum huge pages or -1 if no minimum. */
57 : long rsv_hpages; /* Pages reserved against global pool to */
58 : /* satisfy minimum size. */
59 : };
60 :
61 : struct resv_map {
62 : struct kref refs;
63 : spinlock_t lock;
64 : struct list_head regions;
65 : long adds_in_progress;
66 : struct list_head region_cache;
67 : long region_cache_count;
68 : #ifdef CONFIG_CGROUP_HUGETLB
69 : /*
70 : * On private mappings, the counter to uncharge reservations is stored
71 : * here. If these fields are 0, then either the mapping is shared, or
72 : * cgroup accounting is disabled for this resv_map.
73 : */
74 : struct page_counter *reservation_counter;
75 : unsigned long pages_per_hpage;
76 : struct cgroup_subsys_state *css;
77 : #endif
78 : };
79 :
80 : /*
81 : * Region tracking -- allows tracking of reservations and instantiated pages
82 : * across the pages in a mapping.
83 : *
84 : * The region data structures are embedded into a resv_map and protected
85 : * by a resv_map's lock. The set of regions within the resv_map represent
86 : * reservations for huge pages, or huge pages that have already been
87 : * instantiated within the map. The from and to elements are huge page
88 : * indices into the associated mapping. from indicates the starting index
89 : * of the region. to represents the first index past the end of the region.
90 : *
91 : * For example, a file region structure with from == 0 and to == 4 represents
92 : * four huge pages in a mapping. It is important to note that the to element
93 : * represents the first element past the end of the region. This is used in
94 : * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
95 : *
96 : * Interval notation of the form [from, to) will be used to indicate that
97 : * the endpoint from is inclusive and to is exclusive.
98 : */
99 : struct file_region {
100 : struct list_head link;
101 : long from;
102 : long to;
103 : #ifdef CONFIG_CGROUP_HUGETLB
104 : /*
105 : * On shared mappings, each reserved region appears as a struct
106 : * file_region in resv_map. These fields hold the info needed to
107 : * uncharge each reservation.
108 : */
109 : struct page_counter *reservation_counter;
110 : struct cgroup_subsys_state *css;
111 : #endif
112 : };
113 :
114 : extern struct resv_map *resv_map_alloc(void);
115 : void resv_map_release(struct kref *ref);
116 :
117 : extern spinlock_t hugetlb_lock;
118 : extern int hugetlb_max_hstate __read_mostly;
119 : #define for_each_hstate(h) \
120 : for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
121 :
122 : struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
123 : long min_hpages);
124 : void hugepage_put_subpool(struct hugepage_subpool *spool);
125 :
126 : void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
127 : void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
128 : int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
129 : int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
130 : loff_t *);
131 : int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
132 : loff_t *);
133 : int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
134 : loff_t *);
135 :
136 : int move_hugetlb_page_tables(struct vm_area_struct *vma,
137 : struct vm_area_struct *new_vma,
138 : unsigned long old_addr, unsigned long new_addr,
139 : unsigned long len);
140 : int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
141 : long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
142 : struct page **, struct vm_area_struct **,
143 : unsigned long *, unsigned long *, long, unsigned int,
144 : int *);
145 : void unmap_hugepage_range(struct vm_area_struct *,
146 : unsigned long, unsigned long, struct page *);
147 : void __unmap_hugepage_range_final(struct mmu_gather *tlb,
148 : struct vm_area_struct *vma,
149 : unsigned long start, unsigned long end,
150 : struct page *ref_page);
151 : void hugetlb_report_meminfo(struct seq_file *);
152 : int hugetlb_report_node_meminfo(char *buf, int len, int nid);
153 : void hugetlb_show_meminfo(void);
154 : unsigned long hugetlb_total_pages(void);
155 : vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
156 : unsigned long address, unsigned int flags);
157 : #ifdef CONFIG_USERFAULTFD
158 : int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
159 : struct vm_area_struct *dst_vma,
160 : unsigned long dst_addr,
161 : unsigned long src_addr,
162 : enum mcopy_atomic_mode mode,
163 : struct page **pagep);
164 : #endif /* CONFIG_USERFAULTFD */
165 : bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
166 : struct vm_area_struct *vma,
167 : vm_flags_t vm_flags);
168 : long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
169 : long freed);
170 : bool isolate_huge_page(struct page *page, struct list_head *list);
171 : int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
172 : int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
173 : void putback_active_hugepage(struct page *page);
174 : void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
175 : void free_huge_page(struct page *page);
176 : void hugetlb_fix_reserve_counts(struct inode *inode);
177 : extern struct mutex *hugetlb_fault_mutex_table;
178 : u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
179 :
180 : pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
181 : unsigned long addr, pud_t *pud);
182 :
183 : struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
184 :
185 : extern int sysctl_hugetlb_shm_group;
186 : extern struct list_head huge_boot_pages;
187 :
188 : /* arch callbacks */
189 :
190 : pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
191 : unsigned long addr, unsigned long sz);
192 : pte_t *huge_pte_offset(struct mm_struct *mm,
193 : unsigned long addr, unsigned long sz);
194 : int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
195 : unsigned long *addr, pte_t *ptep);
196 : void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
197 : unsigned long *start, unsigned long *end);
198 : struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
199 : int write);
200 : struct page *follow_huge_pd(struct vm_area_struct *vma,
201 : unsigned long address, hugepd_t hpd,
202 : int flags, int pdshift);
203 : struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
204 : pmd_t *pmd, int flags);
205 : struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
206 : pud_t *pud, int flags);
207 : struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
208 : pgd_t *pgd, int flags);
209 :
210 : int pmd_huge(pmd_t pmd);
211 : int pud_huge(pud_t pud);
212 : unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
213 : unsigned long address, unsigned long end, pgprot_t newprot);
214 :
215 : bool is_hugetlb_entry_migration(pte_t pte);
216 : void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
217 :
218 : #else /* !CONFIG_HUGETLB_PAGE */
219 :
220 : static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
221 : {
222 : }
223 :
224 : static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
225 : {
226 : }
227 :
228 : static inline unsigned long hugetlb_total_pages(void)
229 : {
230 : return 0;
231 : }
232 :
233 : static inline struct address_space *hugetlb_page_mapping_lock_write(
234 : struct page *hpage)
235 : {
236 : return NULL;
237 : }
238 :
239 : static inline int huge_pmd_unshare(struct mm_struct *mm,
240 : struct vm_area_struct *vma,
241 : unsigned long *addr, pte_t *ptep)
242 : {
243 : return 0;
244 : }
245 :
246 : static inline void adjust_range_if_pmd_sharing_possible(
247 : struct vm_area_struct *vma,
248 : unsigned long *start, unsigned long *end)
249 : {
250 : }
251 :
252 : static inline long follow_hugetlb_page(struct mm_struct *mm,
253 : struct vm_area_struct *vma, struct page **pages,
254 : struct vm_area_struct **vmas, unsigned long *position,
255 : unsigned long *nr_pages, long i, unsigned int flags,
256 : int *nonblocking)
257 : {
258 : BUG();
259 : return 0;
260 : }
261 :
262 : static inline struct page *follow_huge_addr(struct mm_struct *mm,
263 : unsigned long address, int write)
264 : {
265 0 : return ERR_PTR(-EINVAL);
266 : }
267 :
268 : static inline int copy_hugetlb_page_range(struct mm_struct *dst,
269 : struct mm_struct *src, struct vm_area_struct *vma)
270 : {
271 : BUG();
272 : return 0;
273 : }
274 :
275 : static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
276 : struct vm_area_struct *new_vma,
277 : unsigned long old_addr,
278 : unsigned long new_addr,
279 : unsigned long len)
280 : {
281 : BUG();
282 : return 0;
283 : }
284 :
285 : static inline void hugetlb_report_meminfo(struct seq_file *m)
286 : {
287 : }
288 :
289 : static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
290 : {
291 : return 0;
292 : }
293 :
294 : static inline void hugetlb_show_meminfo(void)
295 : {
296 : }
297 :
298 : static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
299 : unsigned long address, hugepd_t hpd, int flags,
300 : int pdshift)
301 : {
302 : return NULL;
303 : }
304 :
305 : static inline struct page *follow_huge_pmd(struct mm_struct *mm,
306 : unsigned long address, pmd_t *pmd, int flags)
307 : {
308 : return NULL;
309 : }
310 :
311 : static inline struct page *follow_huge_pud(struct mm_struct *mm,
312 : unsigned long address, pud_t *pud, int flags)
313 : {
314 : return NULL;
315 : }
316 :
317 : static inline struct page *follow_huge_pgd(struct mm_struct *mm,
318 : unsigned long address, pgd_t *pgd, int flags)
319 : {
320 : return NULL;
321 : }
322 :
323 : static inline int prepare_hugepage_range(struct file *file,
324 : unsigned long addr, unsigned long len)
325 : {
326 : return -EINVAL;
327 : }
328 :
329 : static inline int pmd_huge(pmd_t pmd)
330 : {
331 : return 0;
332 : }
333 :
334 : static inline int pud_huge(pud_t pud)
335 : {
336 : return 0;
337 : }
338 :
339 : static inline int is_hugepage_only_range(struct mm_struct *mm,
340 : unsigned long addr, unsigned long len)
341 : {
342 : return 0;
343 : }
344 :
345 : static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
346 : unsigned long addr, unsigned long end,
347 : unsigned long floor, unsigned long ceiling)
348 : {
349 : BUG();
350 : }
351 :
352 : #ifdef CONFIG_USERFAULTFD
353 : static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
354 : pte_t *dst_pte,
355 : struct vm_area_struct *dst_vma,
356 : unsigned long dst_addr,
357 : unsigned long src_addr,
358 : enum mcopy_atomic_mode mode,
359 : struct page **pagep)
360 : {
361 : BUG();
362 : return 0;
363 : }
364 : #endif /* CONFIG_USERFAULTFD */
365 :
366 : static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
367 : unsigned long sz)
368 : {
369 : return NULL;
370 : }
371 :
372 : static inline bool isolate_huge_page(struct page *page, struct list_head *list)
373 : {
374 : return false;
375 : }
376 :
377 : static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
378 : {
379 : return 0;
380 : }
381 :
382 : static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
383 : {
384 : return 0;
385 : }
386 :
387 : static inline void putback_active_hugepage(struct page *page)
388 : {
389 : }
390 :
391 : static inline void move_hugetlb_state(struct page *oldpage,
392 : struct page *newpage, int reason)
393 : {
394 : }
395 :
396 : static inline unsigned long hugetlb_change_protection(
397 : struct vm_area_struct *vma, unsigned long address,
398 : unsigned long end, pgprot_t newprot)
399 : {
400 : return 0;
401 : }
402 :
403 : static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
404 : struct vm_area_struct *vma, unsigned long start,
405 : unsigned long end, struct page *ref_page)
406 : {
407 : BUG();
408 : }
409 :
410 : static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
411 : struct vm_area_struct *vma, unsigned long address,
412 : unsigned int flags)
413 : {
414 : BUG();
415 : return 0;
416 : }
417 :
418 : static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
419 :
420 : #endif /* !CONFIG_HUGETLB_PAGE */
421 : /*
422 : * hugepages at page global directory. If arch support
423 : * hugepages at pgd level, they need to define this.
424 : */
425 : #ifndef pgd_huge
426 : #define pgd_huge(x) 0
427 : #endif
428 : #ifndef p4d_huge
429 : #define p4d_huge(x) 0
430 : #endif
431 :
432 : #ifndef pgd_write
433 : static inline int pgd_write(pgd_t pgd)
434 : {
435 : BUG();
436 : return 0;
437 : }
438 : #endif
439 :
440 : #define HUGETLB_ANON_FILE "anon_hugepage"
441 :
442 : enum {
443 : /*
444 : * The file will be used as an shm file so shmfs accounting rules
445 : * apply
446 : */
447 : HUGETLB_SHMFS_INODE = 1,
448 : /*
449 : * The file is being created on the internal vfs mount and shmfs
450 : * accounting rules do not apply
451 : */
452 : HUGETLB_ANONHUGE_INODE = 2,
453 : };
454 :
455 : #ifdef CONFIG_HUGETLBFS
456 : struct hugetlbfs_sb_info {
457 : long max_inodes; /* inodes allowed */
458 : long free_inodes; /* inodes free */
459 : spinlock_t stat_lock;
460 : struct hstate *hstate;
461 : struct hugepage_subpool *spool;
462 : kuid_t uid;
463 : kgid_t gid;
464 : umode_t mode;
465 : };
466 :
467 : static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
468 : {
469 : return sb->s_fs_info;
470 : }
471 :
472 : struct hugetlbfs_inode_info {
473 : struct shared_policy policy;
474 : struct inode vfs_inode;
475 : unsigned int seals;
476 : };
477 :
478 : static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
479 : {
480 : return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
481 : }
482 :
483 : extern const struct file_operations hugetlbfs_file_operations;
484 : extern const struct vm_operations_struct hugetlb_vm_ops;
485 : struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
486 : int creat_flags, int page_size_log);
487 :
488 : static inline bool is_file_hugepages(struct file *file)
489 : {
490 : if (file->f_op == &hugetlbfs_file_operations)
491 : return true;
492 :
493 : return is_file_shm_hugepages(file);
494 : }
495 :
496 : static inline struct hstate *hstate_inode(struct inode *i)
497 : {
498 : return HUGETLBFS_SB(i->i_sb)->hstate;
499 : }
500 : #else /* !CONFIG_HUGETLBFS */
501 :
502 : #define is_file_hugepages(file) false
503 : static inline struct file *
504 : hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
505 : int creat_flags, int page_size_log)
506 : {
507 : return ERR_PTR(-ENOSYS);
508 : }
509 :
510 : static inline struct hstate *hstate_inode(struct inode *i)
511 : {
512 : return NULL;
513 : }
514 : #endif /* !CONFIG_HUGETLBFS */
515 :
516 : #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
517 : unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
518 : unsigned long len, unsigned long pgoff,
519 : unsigned long flags);
520 : #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
521 :
522 : /*
523 : * huegtlb page specific state flags. These flags are located in page.private
524 : * of the hugetlb head page. Functions created via the below macros should be
525 : * used to manipulate these flags.
526 : *
527 : * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
528 : * allocation time. Cleared when page is fully instantiated. Free
529 : * routine checks flag to restore a reservation on error paths.
530 : * Synchronization: Examined or modified by code that knows it has
531 : * the only reference to page. i.e. After allocation but before use
532 : * or when the page is being freed.
533 : * HPG_migratable - Set after a newly allocated page is added to the page
534 : * cache and/or page tables. Indicates the page is a candidate for
535 : * migration.
536 : * Synchronization: Initially set after new page allocation with no
537 : * locking. When examined and modified during migration processing
538 : * (isolate, migrate, putback) the hugetlb_lock is held.
539 : * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
540 : * allocator. Typically used for migration target pages when no pages
541 : * are available in the pool. The hugetlb free page path will
542 : * immediately free pages with this flag set to the buddy allocator.
543 : * Synchronization: Can be set after huge page allocation from buddy when
544 : * code knows it has only reference. All other examinations and
545 : * modifications require hugetlb_lock.
546 : * HPG_freed - Set when page is on the free lists.
547 : * Synchronization: hugetlb_lock held for examination and modification.
548 : * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
549 : */
550 : enum hugetlb_page_flags {
551 : HPG_restore_reserve = 0,
552 : HPG_migratable,
553 : HPG_temporary,
554 : HPG_freed,
555 : HPG_vmemmap_optimized,
556 : __NR_HPAGEFLAGS,
557 : };
558 :
559 : /*
560 : * Macros to create test, set and clear function definitions for
561 : * hugetlb specific page flags.
562 : */
563 : #ifdef CONFIG_HUGETLB_PAGE
564 : #define TESTHPAGEFLAG(uname, flname) \
565 : static inline int HPage##uname(struct page *page) \
566 : { return test_bit(HPG_##flname, &(page->private)); }
567 :
568 : #define SETHPAGEFLAG(uname, flname) \
569 : static inline void SetHPage##uname(struct page *page) \
570 : { set_bit(HPG_##flname, &(page->private)); }
571 :
572 : #define CLEARHPAGEFLAG(uname, flname) \
573 : static inline void ClearHPage##uname(struct page *page) \
574 : { clear_bit(HPG_##flname, &(page->private)); }
575 : #else
576 : #define TESTHPAGEFLAG(uname, flname) \
577 : static inline int HPage##uname(struct page *page) \
578 : { return 0; }
579 :
580 : #define SETHPAGEFLAG(uname, flname) \
581 : static inline void SetHPage##uname(struct page *page) \
582 : { }
583 :
584 : #define CLEARHPAGEFLAG(uname, flname) \
585 : static inline void ClearHPage##uname(struct page *page) \
586 : { }
587 : #endif
588 :
589 : #define HPAGEFLAG(uname, flname) \
590 : TESTHPAGEFLAG(uname, flname) \
591 : SETHPAGEFLAG(uname, flname) \
592 : CLEARHPAGEFLAG(uname, flname) \
593 :
594 : /*
595 : * Create functions associated with hugetlb page flags
596 : */
597 : HPAGEFLAG(RestoreReserve, restore_reserve)
598 : HPAGEFLAG(Migratable, migratable)
599 : HPAGEFLAG(Temporary, temporary)
600 : HPAGEFLAG(Freed, freed)
601 : HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
602 :
603 : #ifdef CONFIG_HUGETLB_PAGE
604 :
605 : #define HSTATE_NAME_LEN 32
606 : /* Defines one hugetlb page size */
607 : struct hstate {
608 : struct mutex resize_lock;
609 : int next_nid_to_alloc;
610 : int next_nid_to_free;
611 : unsigned int order;
612 : unsigned int demote_order;
613 : unsigned long mask;
614 : unsigned long max_huge_pages;
615 : unsigned long nr_huge_pages;
616 : unsigned long free_huge_pages;
617 : unsigned long resv_huge_pages;
618 : unsigned long surplus_huge_pages;
619 : unsigned long nr_overcommit_huge_pages;
620 : struct list_head hugepage_activelist;
621 : struct list_head hugepage_freelists[MAX_NUMNODES];
622 : unsigned int max_huge_pages_node[MAX_NUMNODES];
623 : unsigned int nr_huge_pages_node[MAX_NUMNODES];
624 : unsigned int free_huge_pages_node[MAX_NUMNODES];
625 : unsigned int surplus_huge_pages_node[MAX_NUMNODES];
626 : #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
627 : unsigned int nr_free_vmemmap_pages;
628 : #endif
629 : #ifdef CONFIG_CGROUP_HUGETLB
630 : /* cgroup control files */
631 : struct cftype cgroup_files_dfl[8];
632 : struct cftype cgroup_files_legacy[10];
633 : #endif
634 : char name[HSTATE_NAME_LEN];
635 : };
636 :
637 : struct huge_bootmem_page {
638 : struct list_head list;
639 : struct hstate *hstate;
640 : };
641 :
642 : int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
643 : struct page *alloc_huge_page(struct vm_area_struct *vma,
644 : unsigned long addr, int avoid_reserve);
645 : struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
646 : nodemask_t *nmask, gfp_t gfp_mask);
647 : struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
648 : unsigned long address);
649 : int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
650 : pgoff_t idx);
651 : void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
652 : unsigned long address, struct page *page);
653 :
654 : /* arch callback */
655 : int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
656 : int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
657 : bool __init hugetlb_node_alloc_supported(void);
658 :
659 : void __init hugetlb_add_hstate(unsigned order);
660 : bool __init arch_hugetlb_valid_size(unsigned long size);
661 : struct hstate *size_to_hstate(unsigned long size);
662 :
663 : #ifndef HUGE_MAX_HSTATE
664 : #define HUGE_MAX_HSTATE 1
665 : #endif
666 :
667 : extern struct hstate hstates[HUGE_MAX_HSTATE];
668 : extern unsigned int default_hstate_idx;
669 :
670 : #define default_hstate (hstates[default_hstate_idx])
671 :
672 : /*
673 : * hugetlb page subpool pointer located in hpage[1].private
674 : */
675 : static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
676 : {
677 : return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
678 : }
679 :
680 : static inline void hugetlb_set_page_subpool(struct page *hpage,
681 : struct hugepage_subpool *subpool)
682 : {
683 : set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
684 : }
685 :
686 : static inline struct hstate *hstate_file(struct file *f)
687 : {
688 : return hstate_inode(file_inode(f));
689 : }
690 :
691 : static inline struct hstate *hstate_sizelog(int page_size_log)
692 : {
693 : if (!page_size_log)
694 : return &default_hstate;
695 :
696 : return size_to_hstate(1UL << page_size_log);
697 : }
698 :
699 : static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
700 : {
701 : return hstate_file(vma->vm_file);
702 : }
703 :
704 : static inline unsigned long huge_page_size(struct hstate *h)
705 : {
706 : return (unsigned long)PAGE_SIZE << h->order;
707 : }
708 :
709 : extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
710 :
711 : extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
712 :
713 : static inline unsigned long huge_page_mask(struct hstate *h)
714 : {
715 : return h->mask;
716 : }
717 :
718 : static inline unsigned int huge_page_order(struct hstate *h)
719 : {
720 : return h->order;
721 : }
722 :
723 : static inline unsigned huge_page_shift(struct hstate *h)
724 : {
725 : return h->order + PAGE_SHIFT;
726 : }
727 :
728 : static inline bool hstate_is_gigantic(struct hstate *h)
729 : {
730 : return huge_page_order(h) >= MAX_ORDER;
731 : }
732 :
733 : static inline unsigned int pages_per_huge_page(struct hstate *h)
734 : {
735 : return 1 << h->order;
736 : }
737 :
738 : static inline unsigned int blocks_per_huge_page(struct hstate *h)
739 : {
740 : return huge_page_size(h) / 512;
741 : }
742 :
743 : #include <asm/hugetlb.h>
744 :
745 : #ifndef is_hugepage_only_range
746 : static inline int is_hugepage_only_range(struct mm_struct *mm,
747 : unsigned long addr, unsigned long len)
748 : {
749 : return 0;
750 : }
751 : #define is_hugepage_only_range is_hugepage_only_range
752 : #endif
753 :
754 : #ifndef arch_clear_hugepage_flags
755 : static inline void arch_clear_hugepage_flags(struct page *page) { }
756 : #define arch_clear_hugepage_flags arch_clear_hugepage_flags
757 : #endif
758 :
759 : #ifndef arch_make_huge_pte
760 : static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
761 : vm_flags_t flags)
762 : {
763 : return pte_mkhuge(entry);
764 : }
765 : #endif
766 :
767 : static inline struct hstate *page_hstate(struct page *page)
768 : {
769 : VM_BUG_ON_PAGE(!PageHuge(page), page);
770 : return size_to_hstate(page_size(page));
771 : }
772 :
773 : static inline unsigned hstate_index_to_shift(unsigned index)
774 : {
775 : return hstates[index].order + PAGE_SHIFT;
776 : }
777 :
778 : static inline int hstate_index(struct hstate *h)
779 : {
780 : return h - hstates;
781 : }
782 :
783 : extern int dissolve_free_huge_page(struct page *page);
784 : extern int dissolve_free_huge_pages(unsigned long start_pfn,
785 : unsigned long end_pfn);
786 :
787 : #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
788 : #ifndef arch_hugetlb_migration_supported
789 : static inline bool arch_hugetlb_migration_supported(struct hstate *h)
790 : {
791 : if ((huge_page_shift(h) == PMD_SHIFT) ||
792 : (huge_page_shift(h) == PUD_SHIFT) ||
793 : (huge_page_shift(h) == PGDIR_SHIFT))
794 : return true;
795 : else
796 : return false;
797 : }
798 : #endif
799 : #else
800 : static inline bool arch_hugetlb_migration_supported(struct hstate *h)
801 : {
802 : return false;
803 : }
804 : #endif
805 :
806 : static inline bool hugepage_migration_supported(struct hstate *h)
807 : {
808 : return arch_hugetlb_migration_supported(h);
809 : }
810 :
811 : /*
812 : * Movability check is different as compared to migration check.
813 : * It determines whether or not a huge page should be placed on
814 : * movable zone or not. Movability of any huge page should be
815 : * required only if huge page size is supported for migration.
816 : * There won't be any reason for the huge page to be movable if
817 : * it is not migratable to start with. Also the size of the huge
818 : * page should be large enough to be placed under a movable zone
819 : * and still feasible enough to be migratable. Just the presence
820 : * in movable zone does not make the migration feasible.
821 : *
822 : * So even though large huge page sizes like the gigantic ones
823 : * are migratable they should not be movable because its not
824 : * feasible to migrate them from movable zone.
825 : */
826 : static inline bool hugepage_movable_supported(struct hstate *h)
827 : {
828 : if (!hugepage_migration_supported(h))
829 : return false;
830 :
831 : if (hstate_is_gigantic(h))
832 : return false;
833 : return true;
834 : }
835 :
836 : /* Movability of hugepages depends on migration support. */
837 : static inline gfp_t htlb_alloc_mask(struct hstate *h)
838 : {
839 : if (hugepage_movable_supported(h))
840 : return GFP_HIGHUSER_MOVABLE;
841 : else
842 : return GFP_HIGHUSER;
843 : }
844 :
845 : static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
846 : {
847 : gfp_t modified_mask = htlb_alloc_mask(h);
848 :
849 : /* Some callers might want to enforce node */
850 : modified_mask |= (gfp_mask & __GFP_THISNODE);
851 :
852 : modified_mask |= (gfp_mask & __GFP_NOWARN);
853 :
854 : return modified_mask;
855 : }
856 :
857 : static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
858 : struct mm_struct *mm, pte_t *pte)
859 : {
860 : if (huge_page_size(h) == PMD_SIZE)
861 : return pmd_lockptr(mm, (pmd_t *) pte);
862 : VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
863 : return &mm->page_table_lock;
864 : }
865 :
866 : #ifndef hugepages_supported
867 : /*
868 : * Some platform decide whether they support huge pages at boot
869 : * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
870 : * when there is no such support
871 : */
872 : #define hugepages_supported() (HPAGE_SHIFT != 0)
873 : #endif
874 :
875 : void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
876 :
877 : static inline void hugetlb_count_init(struct mm_struct *mm)
878 : {
879 : atomic_long_set(&mm->hugetlb_usage, 0);
880 : }
881 :
882 : static inline void hugetlb_count_add(long l, struct mm_struct *mm)
883 : {
884 : atomic_long_add(l, &mm->hugetlb_usage);
885 : }
886 :
887 : static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
888 : {
889 : atomic_long_sub(l, &mm->hugetlb_usage);
890 : }
891 :
892 : #ifndef set_huge_swap_pte_at
893 : static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
894 : pte_t *ptep, pte_t pte, unsigned long sz)
895 : {
896 : set_huge_pte_at(mm, addr, ptep, pte);
897 : }
898 : #endif
899 :
900 : #ifndef huge_ptep_modify_prot_start
901 : #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
902 : static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
903 : unsigned long addr, pte_t *ptep)
904 : {
905 : return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
906 : }
907 : #endif
908 :
909 : #ifndef huge_ptep_modify_prot_commit
910 : #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
911 : static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
912 : unsigned long addr, pte_t *ptep,
913 : pte_t old_pte, pte_t pte)
914 : {
915 : set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
916 : }
917 : #endif
918 :
919 : #else /* CONFIG_HUGETLB_PAGE */
920 : struct hstate {};
921 :
922 : static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
923 : {
924 : return NULL;
925 : }
926 :
927 : static inline int isolate_or_dissolve_huge_page(struct page *page,
928 : struct list_head *list)
929 : {
930 : return -ENOMEM;
931 : }
932 :
933 : static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
934 : unsigned long addr,
935 : int avoid_reserve)
936 : {
937 : return NULL;
938 : }
939 :
940 : static inline struct page *
941 : alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
942 : nodemask_t *nmask, gfp_t gfp_mask)
943 : {
944 : return NULL;
945 : }
946 :
947 : static inline struct page *alloc_huge_page_vma(struct hstate *h,
948 : struct vm_area_struct *vma,
949 : unsigned long address)
950 : {
951 : return NULL;
952 : }
953 :
954 : static inline int __alloc_bootmem_huge_page(struct hstate *h)
955 : {
956 : return 0;
957 : }
958 :
959 : static inline struct hstate *hstate_file(struct file *f)
960 : {
961 : return NULL;
962 : }
963 :
964 : static inline struct hstate *hstate_sizelog(int page_size_log)
965 : {
966 : return NULL;
967 : }
968 :
969 : static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
970 : {
971 : return NULL;
972 : }
973 :
974 : static inline struct hstate *page_hstate(struct page *page)
975 : {
976 : return NULL;
977 : }
978 :
979 : static inline struct hstate *size_to_hstate(unsigned long size)
980 : {
981 : return NULL;
982 : }
983 :
984 : static inline unsigned long huge_page_size(struct hstate *h)
985 : {
986 : return PAGE_SIZE;
987 : }
988 :
989 : static inline unsigned long huge_page_mask(struct hstate *h)
990 : {
991 : return PAGE_MASK;
992 : }
993 :
994 : static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
995 : {
996 : return PAGE_SIZE;
997 : }
998 :
999 : static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1000 : {
1001 : return PAGE_SIZE;
1002 : }
1003 :
1004 : static inline unsigned int huge_page_order(struct hstate *h)
1005 : {
1006 : return 0;
1007 : }
1008 :
1009 : static inline unsigned int huge_page_shift(struct hstate *h)
1010 : {
1011 : return PAGE_SHIFT;
1012 : }
1013 :
1014 : static inline bool hstate_is_gigantic(struct hstate *h)
1015 : {
1016 : return false;
1017 : }
1018 :
1019 : static inline unsigned int pages_per_huge_page(struct hstate *h)
1020 : {
1021 : return 1;
1022 : }
1023 :
1024 : static inline unsigned hstate_index_to_shift(unsigned index)
1025 : {
1026 : return 0;
1027 : }
1028 :
1029 : static inline int hstate_index(struct hstate *h)
1030 : {
1031 : return 0;
1032 : }
1033 :
1034 : static inline int dissolve_free_huge_page(struct page *page)
1035 : {
1036 : return 0;
1037 : }
1038 :
1039 : static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1040 : unsigned long end_pfn)
1041 : {
1042 : return 0;
1043 : }
1044 :
1045 : static inline bool hugepage_migration_supported(struct hstate *h)
1046 : {
1047 : return false;
1048 : }
1049 :
1050 : static inline bool hugepage_movable_supported(struct hstate *h)
1051 : {
1052 : return false;
1053 : }
1054 :
1055 : static inline gfp_t htlb_alloc_mask(struct hstate *h)
1056 : {
1057 : return 0;
1058 : }
1059 :
1060 : static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1061 : {
1062 : return 0;
1063 : }
1064 :
1065 : static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1066 : struct mm_struct *mm, pte_t *pte)
1067 : {
1068 0 : return &mm->page_table_lock;
1069 : }
1070 :
1071 : static inline void hugetlb_count_init(struct mm_struct *mm)
1072 : {
1073 : }
1074 :
1075 : static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1076 : {
1077 : }
1078 :
1079 : static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1080 : {
1081 : }
1082 :
1083 : static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1084 : pte_t *ptep, pte_t pte, unsigned long sz)
1085 : {
1086 : }
1087 : #endif /* CONFIG_HUGETLB_PAGE */
1088 :
1089 : static inline spinlock_t *huge_pte_lock(struct hstate *h,
1090 : struct mm_struct *mm, pte_t *pte)
1091 : {
1092 : spinlock_t *ptl;
1093 :
1094 : ptl = huge_pte_lockptr(h, mm, pte);
1095 : spin_lock(ptl);
1096 : return ptl;
1097 : }
1098 :
1099 : #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1100 : extern void __init hugetlb_cma_reserve(int order);
1101 : extern void __init hugetlb_cma_check(void);
1102 : #else
1103 : static inline __init void hugetlb_cma_reserve(int order)
1104 : {
1105 : }
1106 : static inline __init void hugetlb_cma_check(void)
1107 : {
1108 : }
1109 : #endif
1110 :
1111 : bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1112 :
1113 : #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1114 : /*
1115 : * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1116 : * implement this.
1117 : */
1118 : #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1119 : #endif
1120 :
1121 : #endif /* _LINUX_HUGETLB_H */
|