LCOV - code coverage report
Current view: top level - include/linux - vmalloc.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 2 2 100.0 %
Date: 2022-12-09 01:23:36 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_VMALLOC_H
       3             : #define _LINUX_VMALLOC_H
       4             : 
       5             : #include <linux/spinlock.h>
       6             : #include <linux/init.h>
       7             : #include <linux/list.h>
       8             : #include <linux/llist.h>
       9             : #include <asm/page.h>             /* pgprot_t */
      10             : #include <linux/rbtree.h>
      11             : #include <linux/overflow.h>
      12             : 
      13             : #include <asm/vmalloc.h>
      14             : 
      15             : struct vm_area_struct;          /* vma defining user mapping in mm_types.h */
      16             : struct notifier_block;          /* in notifier.h */
      17             : 
      18             : /* bits in flags of vmalloc's vm_struct below */
      19             : #define VM_IOREMAP              0x00000001      /* ioremap() and friends */
      20             : #define VM_ALLOC                0x00000002      /* vmalloc() */
      21             : #define VM_MAP                  0x00000004      /* vmap()ed pages */
      22             : #define VM_USERMAP              0x00000008      /* suitable for remap_vmalloc_range */
      23             : #define VM_DMA_COHERENT         0x00000010      /* dma_alloc_coherent */
      24             : #define VM_UNINITIALIZED        0x00000020      /* vm_struct is not fully initialized */
      25             : #define VM_NO_GUARD             0x00000040      /* ***DANGEROUS*** don't add guard page */
      26             : #define VM_KASAN                0x00000080      /* has allocated kasan shadow memory */
      27             : #define VM_FLUSH_RESET_PERMS    0x00000100      /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
      28             : #define VM_MAP_PUT_PAGES        0x00000200      /* put pages and free array in vfree */
      29             : #define VM_ALLOW_HUGE_VMAP      0x00000400      /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
      30             : 
      31             : #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
      32             :         !defined(CONFIG_KASAN_VMALLOC)
      33             : #define VM_DEFER_KMEMLEAK       0x00000800      /* defer kmemleak object creation */
      34             : #else
      35             : #define VM_DEFER_KMEMLEAK       0
      36             : #endif
      37             : 
      38             : /* bits [20..32] reserved for arch specific ioremap internals */
      39             : 
      40             : /*
      41             :  * Maximum alignment for ioremap() regions.
      42             :  * Can be overridden by arch-specific value.
      43             :  */
      44             : #ifndef IOREMAP_MAX_ORDER
      45             : #define IOREMAP_MAX_ORDER       (7 + PAGE_SHIFT)        /* 128 pages */
      46             : #endif
      47             : 
      48             : struct vm_struct {
      49             :         struct vm_struct        *next;
      50             :         void                    *addr;
      51             :         unsigned long           size;
      52             :         unsigned long           flags;
      53             :         struct page             **pages;
      54             : #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
      55             :         unsigned int            page_order;
      56             : #endif
      57             :         unsigned int            nr_pages;
      58             :         phys_addr_t             phys_addr;
      59             :         const void              *caller;
      60             : };
      61             : 
      62             : struct vmap_area {
      63             :         unsigned long va_start;
      64             :         unsigned long va_end;
      65             : 
      66             :         struct rb_node rb_node;         /* address sorted rbtree */
      67             :         struct list_head list;          /* address sorted list */
      68             : 
      69             :         /*
      70             :          * The following two variables can be packed, because
      71             :          * a vmap_area object can be either:
      72             :          *    1) in "free" tree (root is free_vmap_area_root)
      73             :          *    2) or "busy" tree (root is vmap_area_root)
      74             :          */
      75             :         union {
      76             :                 unsigned long subtree_max_size; /* in "free" tree */
      77             :                 struct vm_struct *vm;           /* in "busy" tree */
      78             :         };
      79             : };
      80             : 
      81             : /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
      82             : #ifndef arch_vmap_p4d_supported
      83             : static inline bool arch_vmap_p4d_supported(pgprot_t prot)
      84             : {
      85             :         return false;
      86             : }
      87             : #endif
      88             : 
      89             : #ifndef arch_vmap_pud_supported
      90             : static inline bool arch_vmap_pud_supported(pgprot_t prot)
      91             : {
      92             :         return false;
      93             : }
      94             : #endif
      95             : 
      96             : #ifndef arch_vmap_pmd_supported
      97             : static inline bool arch_vmap_pmd_supported(pgprot_t prot)
      98             : {
      99             :         return false;
     100             : }
     101             : #endif
     102             : 
     103             : #ifndef arch_vmap_pte_range_map_size
     104             : static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
     105             :                                                          u64 pfn, unsigned int max_page_shift)
     106             : {
     107             :         return PAGE_SIZE;
     108             : }
     109             : #endif
     110             : 
     111             : #ifndef arch_vmap_pte_supported_shift
     112             : static inline int arch_vmap_pte_supported_shift(unsigned long size)
     113             : {
     114             :         return PAGE_SHIFT;
     115             : }
     116             : #endif
     117             : 
     118             : #ifndef arch_vmap_pgprot_tagged
     119             : static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
     120             : {
     121             :         return prot;
     122             : }
     123             : #endif
     124             : 
     125             : /*
     126             :  *      Highlevel APIs for driver use
     127             :  */
     128             : extern void vm_unmap_ram(const void *mem, unsigned int count);
     129             : extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
     130             : extern void vm_unmap_aliases(void);
     131             : 
     132             : #ifdef CONFIG_MMU
     133             : extern void __init vmalloc_init(void);
     134             : extern unsigned long vmalloc_nr_pages(void);
     135             : #else
     136             : static inline void vmalloc_init(void)
     137             : {
     138             : }
     139             : static inline unsigned long vmalloc_nr_pages(void) { return 0; }
     140             : #endif
     141             : 
     142             : extern void *vmalloc(unsigned long size) __alloc_size(1);
     143             : extern void *vzalloc(unsigned long size) __alloc_size(1);
     144             : extern void *vmalloc_user(unsigned long size) __alloc_size(1);
     145             : extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
     146             : extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
     147             : extern void *vmalloc_32(unsigned long size) __alloc_size(1);
     148             : extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
     149             : extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
     150             : extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
     151             :                         unsigned long start, unsigned long end, gfp_t gfp_mask,
     152             :                         pgprot_t prot, unsigned long vm_flags, int node,
     153             :                         const void *caller) __alloc_size(1);
     154             : void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
     155             :                 int node, const void *caller) __alloc_size(1);
     156             : void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
     157             : 
     158             : extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
     159             : extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
     160             : extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
     161             : extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
     162             : 
     163             : extern void vfree(const void *addr);
     164             : extern void vfree_atomic(const void *addr);
     165             : 
     166             : extern void *vmap(struct page **pages, unsigned int count,
     167             :                         unsigned long flags, pgprot_t prot);
     168             : void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
     169             : extern void vunmap(const void *addr);
     170             : 
     171             : extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
     172             :                                        unsigned long uaddr, void *kaddr,
     173             :                                        unsigned long pgoff, unsigned long size);
     174             : 
     175             : extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
     176             :                                                         unsigned long pgoff);
     177             : 
     178             : /*
     179             :  * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
     180             :  * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
     181             :  * needs to be called.
     182             :  */
     183             : #ifndef ARCH_PAGE_TABLE_SYNC_MASK
     184             : #define ARCH_PAGE_TABLE_SYNC_MASK 0
     185             : #endif
     186             : 
     187             : /*
     188             :  * There is no default implementation for arch_sync_kernel_mappings(). It is
     189             :  * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
     190             :  * is 0.
     191             :  */
     192             : void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
     193             : 
     194             : /*
     195             :  *      Lowlevel-APIs (not for driver use!)
     196             :  */
     197             : 
     198             : static inline size_t get_vm_area_size(const struct vm_struct *area)
     199             : {
     200          15 :         if (!(area->flags & VM_NO_GUARD))
     201             :                 /* return actual size without guard page */
     202          15 :                 return area->size - PAGE_SIZE;
     203             :         else
     204             :                 return area->size;
     205             : 
     206             : }
     207             : 
     208             : extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
     209             : extern struct vm_struct *get_vm_area_caller(unsigned long size,
     210             :                                         unsigned long flags, const void *caller);
     211             : extern struct vm_struct *__get_vm_area_caller(unsigned long size,
     212             :                                         unsigned long flags,
     213             :                                         unsigned long start, unsigned long end,
     214             :                                         const void *caller);
     215             : void free_vm_area(struct vm_struct *area);
     216             : extern struct vm_struct *remove_vm_area(const void *addr);
     217             : extern struct vm_struct *find_vm_area(const void *addr);
     218             : 
     219             : static inline bool is_vm_area_hugepages(const void *addr)
     220             : {
     221             :         /*
     222             :          * This may not 100% tell if the area is mapped with > PAGE_SIZE
     223             :          * page table entries, if for some reason the architecture indicates
     224             :          * larger sizes are available but decides not to use them, nothing
     225             :          * prevents that. This only indicates the size of the physical page
     226             :          * allocated in the vmalloc layer.
     227             :          */
     228             : #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
     229             :         return find_vm_area(addr)->page_order > 0;
     230             : #else
     231             :         return false;
     232             : #endif
     233             : }
     234             : 
     235             : #ifdef CONFIG_MMU
     236             : void vunmap_range(unsigned long addr, unsigned long end);
     237             : static inline void set_vm_flush_reset_perms(void *addr)
     238             : {
     239             :         struct vm_struct *vm = find_vm_area(addr);
     240             : 
     241             :         if (vm)
     242             :                 vm->flags |= VM_FLUSH_RESET_PERMS;
     243             : }
     244             : 
     245             : #else
     246             : static inline void set_vm_flush_reset_perms(void *addr)
     247             : {
     248             : }
     249             : #endif
     250             : 
     251             : /* for /proc/kcore */
     252             : extern long vread(char *buf, char *addr, unsigned long count);
     253             : 
     254             : /*
     255             :  *      Internals.  Don't use..
     256             :  */
     257             : extern struct list_head vmap_area_list;
     258             : extern __init void vm_area_add_early(struct vm_struct *vm);
     259             : extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
     260             : 
     261             : #ifdef CONFIG_SMP
     262             : # ifdef CONFIG_MMU
     263             : struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
     264             :                                      const size_t *sizes, int nr_vms,
     265             :                                      size_t align);
     266             : 
     267             : void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
     268             : # else
     269             : static inline struct vm_struct **
     270             : pcpu_get_vm_areas(const unsigned long *offsets,
     271             :                 const size_t *sizes, int nr_vms,
     272             :                 size_t align)
     273             : {
     274             :         return NULL;
     275             : }
     276             : 
     277             : static inline void
     278             : pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
     279             : {
     280             : }
     281             : # endif
     282             : #endif
     283             : 
     284             : #ifdef CONFIG_MMU
     285             : #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
     286             : #else
     287             : #define VMALLOC_TOTAL 0UL
     288             : #endif
     289             : 
     290             : int register_vmap_purge_notifier(struct notifier_block *nb);
     291             : int unregister_vmap_purge_notifier(struct notifier_block *nb);
     292             : 
     293             : #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
     294             : bool vmalloc_dump_obj(void *object);
     295             : #else
     296             : static inline bool vmalloc_dump_obj(void *object) { return false; }
     297             : #endif
     298             : 
     299             : #endif /* _LINUX_VMALLOC_H */

Generated by: LCOV version 1.14