LCOV - code coverage report
Current view: top level - arch/um/include/asm - pgtable.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 7 25 28.0 %
Date: 2022-12-09 01:23:36 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
       4             :  * Copyright 2003 PathScale, Inc.
       5             :  * Derived from include/asm-i386/pgtable.h
       6             :  */
       7             : 
       8             : #ifndef __UM_PGTABLE_H
       9             : #define __UM_PGTABLE_H
      10             : 
      11             : #include <asm/fixmap.h>
      12             : 
      13             : #define _PAGE_PRESENT   0x001
      14             : #define _PAGE_NEWPAGE   0x002
      15             : #define _PAGE_NEWPROT   0x004
      16             : #define _PAGE_RW        0x020
      17             : #define _PAGE_USER      0x040
      18             : #define _PAGE_ACCESSED  0x080
      19             : #define _PAGE_DIRTY     0x100
      20             : /* If _PAGE_PRESENT is clear, we use these: */
      21             : #define _PAGE_PROTNONE  0x010   /* if the user mapped it with PROT_NONE;
      22             :                                    pte_present gives true */
      23             : 
      24             : #ifdef CONFIG_3_LEVEL_PGTABLES
      25             : #include <asm/pgtable-3level.h>
      26             : #else
      27             : #include <asm/pgtable-2level.h>
      28             : #endif
      29             : 
      30             : extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
      31             : 
      32             : /* zero page used for uninitialized stuff */
      33             : extern unsigned long *empty_zero_page;
      34             : 
      35             : /* Just any arbitrary offset to the start of the vmalloc VM area: the
      36             :  * current 8MB value just means that there will be a 8MB "hole" after the
      37             :  * physical memory until the kernel virtual memory starts.  That means that
      38             :  * any out-of-bounds memory accesses will hopefully be caught.
      39             :  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
      40             :  * area for the same reason. ;)
      41             :  */
      42             : 
      43             : extern unsigned long end_iomem;
      44             : 
      45             : #define VMALLOC_OFFSET  (__va_space)
      46             : #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
      47             : #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
      48             : #define VMALLOC_END     (FIXADDR_START-2*PAGE_SIZE)
      49             : #define MODULES_VADDR   VMALLOC_START
      50             : #define MODULES_END     VMALLOC_END
      51             : #define MODULES_LEN     (MODULES_VADDR - MODULES_END)
      52             : 
      53             : #define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
      54             : #define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
      55             : #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
      56             : #define __PAGE_KERNEL_EXEC                                              \
      57             :          (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
      58             : #define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
      59             : #define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
      60             : #define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
      61             : #define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
      62             : #define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
      63             : #define PAGE_KERNEL_EXEC        __pgprot(__PAGE_KERNEL_EXEC)
      64             : 
      65             : /*
      66             :  * The i386 can't do page protection for execute, and considers that the same
      67             :  * are read.
      68             :  * Also, write permissions imply read permissions. This is the closest we can
      69             :  * get..
      70             :  */
      71             : #define __P000  PAGE_NONE
      72             : #define __P001  PAGE_READONLY
      73             : #define __P010  PAGE_COPY
      74             : #define __P011  PAGE_COPY
      75             : #define __P100  PAGE_READONLY
      76             : #define __P101  PAGE_READONLY
      77             : #define __P110  PAGE_COPY
      78             : #define __P111  PAGE_COPY
      79             : 
      80             : #define __S000  PAGE_NONE
      81             : #define __S001  PAGE_READONLY
      82             : #define __S010  PAGE_SHARED
      83             : #define __S011  PAGE_SHARED
      84             : #define __S100  PAGE_READONLY
      85             : #define __S101  PAGE_READONLY
      86             : #define __S110  PAGE_SHARED
      87             : #define __S111  PAGE_SHARED
      88             : 
      89             : /*
      90             :  * ZERO_PAGE is a global shared page that is always zero: used
      91             :  * for zero-mapped memory areas etc..
      92             :  */
      93             : #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
      94             : 
      95             : #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
      96             : 
      97             : #define pmd_none(x)     (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
      98             : #define pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
      99             : 
     100             : #define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
     101             : #define pmd_clear(xp)   do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
     102             : 
     103             : #define pmd_newpage(x)  (pmd_val(x) & _PAGE_NEWPAGE)
     104             : #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
     105             : 
     106             : #define pud_newpage(x)  (pud_val(x) & _PAGE_NEWPAGE)
     107             : #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
     108             : 
     109             : #define p4d_newpage(x)  (p4d_val(x) & _PAGE_NEWPAGE)
     110             : #define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE)
     111             : 
     112             : #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
     113             : #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
     114             : 
     115             : #define pte_page(x) pfn_to_page(pte_pfn(x))
     116             : 
     117             : #define pte_present(x)  pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
     118             : 
     119             : /*
     120             :  * =================================
     121             :  * Flags checking section.
     122             :  * =================================
     123             :  */
     124             : 
     125             : static inline int pte_none(pte_t pte)
     126             : {
     127          60 :         return pte_is_zero(pte);
     128             : }
     129             : 
     130             : /*
     131             :  * The following only work if pte_present() is true.
     132             :  * Undefined behaviour if not..
     133             :  */
     134             : static inline int pte_read(pte_t pte)
     135             : {
     136           0 :         return((pte_get_bits(pte, _PAGE_USER)) &&
     137             :                !(pte_get_bits(pte, _PAGE_PROTNONE)));
     138             : }
     139             : 
     140             : static inline int pte_exec(pte_t pte){
     141             :         return((pte_get_bits(pte, _PAGE_USER)) &&
     142             :                !(pte_get_bits(pte, _PAGE_PROTNONE)));
     143             : }
     144             : 
     145             : static inline int pte_write(pte_t pte)
     146             : {
     147           0 :         return((pte_get_bits(pte, _PAGE_RW)) &&
     148             :                !(pte_get_bits(pte, _PAGE_PROTNONE)));
     149             : }
     150             : 
     151             : static inline int pte_dirty(pte_t pte)
     152             : {
     153           0 :         return pte_get_bits(pte, _PAGE_DIRTY);
     154             : }
     155             : 
     156             : static inline int pte_young(pte_t pte)
     157             : {
     158           0 :         return pte_get_bits(pte, _PAGE_ACCESSED);
     159             : }
     160             : 
     161             : static inline int pte_newpage(pte_t pte)
     162             : {
     163          60 :         return pte_get_bits(pte, _PAGE_NEWPAGE);
     164             : }
     165             : 
     166             : static inline int pte_newprot(pte_t pte)
     167             : {
     168           0 :         return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
     169             : }
     170             : 
     171             : /*
     172             :  * =================================
     173             :  * Flags setting section.
     174             :  * =================================
     175             :  */
     176             : 
     177             : static inline pte_t pte_mknewprot(pte_t pte)
     178             : {
     179          60 :         pte_set_bits(pte, _PAGE_NEWPROT);
     180             :         return(pte);
     181             : }
     182             : 
     183             : static inline pte_t pte_mkclean(pte_t pte)
     184             : {
     185           0 :         pte_clear_bits(pte, _PAGE_DIRTY);
     186             :         return(pte);
     187             : }
     188             : 
     189             : static inline pte_t pte_mkold(pte_t pte)
     190             : {
     191           0 :         pte_clear_bits(pte, _PAGE_ACCESSED);
     192             :         return(pte);
     193             : }
     194             : 
     195             : static inline pte_t pte_wrprotect(pte_t pte)
     196             : {
     197           0 :         if (likely(pte_get_bits(pte, _PAGE_RW)))
     198           0 :                 pte_clear_bits(pte, _PAGE_RW);
     199             :         else
     200             :                 return pte;
     201             :         return(pte_mknewprot(pte));
     202             : }
     203             : 
     204             : static inline pte_t pte_mkread(pte_t pte)
     205             : {
     206             :         if (unlikely(pte_get_bits(pte, _PAGE_USER)))
     207             :                 return pte;
     208             :         pte_set_bits(pte, _PAGE_USER);
     209             :         return(pte_mknewprot(pte));
     210             : }
     211             : 
     212             : static inline pte_t pte_mkdirty(pte_t pte)
     213             : {
     214           0 :         pte_set_bits(pte, _PAGE_DIRTY);
     215             :         return(pte);
     216             : }
     217             : 
     218             : static inline pte_t pte_mkyoung(pte_t pte)
     219             : {
     220           0 :         pte_set_bits(pte, _PAGE_ACCESSED);
     221             :         return(pte);
     222             : }
     223             : 
     224             : static inline pte_t pte_mkwrite(pte_t pte)
     225             : {
     226           0 :         if (unlikely(pte_get_bits(pte,  _PAGE_RW)))
     227             :                 return pte;
     228           0 :         pte_set_bits(pte, _PAGE_RW);
     229             :         return(pte_mknewprot(pte));
     230             : }
     231             : 
     232             : static inline pte_t pte_mkuptodate(pte_t pte)
     233             : {
     234           0 :         pte_clear_bits(pte, _PAGE_NEWPAGE);
     235           0 :         if(pte_present(pte))
     236           0 :                 pte_clear_bits(pte, _PAGE_NEWPROT);
     237             :         return(pte);
     238             : }
     239             : 
     240             : static inline pte_t pte_mknewpage(pte_t pte)
     241             : {
     242          60 :         pte_set_bits(pte, _PAGE_NEWPAGE);
     243             :         return(pte);
     244             : }
     245             : 
     246             : static inline void set_pte(pte_t *pteptr, pte_t pteval)
     247             : {
     248             :         pte_copy(*pteptr, pteval);
     249             : 
     250             :         /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
     251             :          * fix_range knows to unmap it.  _PAGE_NEWPROT is specific to
     252             :          * mapped pages.
     253             :          */
     254             : 
     255          60 :         *pteptr = pte_mknewpage(*pteptr);
     256         120 :         if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
     257             : }
     258             : 
     259             : static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
     260             :                               pte_t *pteptr, pte_t pteval)
     261             : {
     262          60 :         set_pte(pteptr, pteval);
     263             : }
     264             : 
     265             : #define __HAVE_ARCH_PTE_SAME
     266             : static inline int pte_same(pte_t pte_a, pte_t pte_b)
     267             : {
     268           0 :         return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
     269             : }
     270             : 
     271             : /*
     272             :  * Conversion functions: convert a page and protection to a page entry,
     273             :  * and a page entry and page directory to the page they refer to.
     274             :  */
     275             : 
     276             : #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
     277             : #define __virt_to_page(virt) phys_to_page(__pa(virt))
     278             : #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
     279             : #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
     280             : 
     281             : #define mk_pte(page, pgprot) \
     282             :         ({ pte_t pte;                                   \
     283             :                                                         \
     284             :         pte_set_val(pte, page_to_phys(page), (pgprot)); \
     285             :         if (pte_present(pte))                           \
     286             :                 pte_mknewprot(pte_mknewpage(pte));      \
     287             :         pte;})
     288             : 
     289             : static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
     290             : {
     291           0 :         pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
     292             :         return pte;
     293             : }
     294             : 
     295             : /*
     296             :  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
     297             :  *
     298             :  * this macro returns the index of the entry in the pmd page which would
     299             :  * control the given virtual address
     300             :  */
     301             : #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
     302             : 
     303             : struct mm_struct;
     304             : extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
     305             : 
     306             : #define update_mmu_cache(vma,address,ptep) do {} while (0)
     307             : 
     308             : /* Encode and de-code a swap entry */
     309             : #define __swp_type(x)                   (((x).val >> 5) & 0x1f)
     310             : #define __swp_offset(x)                 ((x).val >> 11)
     311             : 
     312             : #define __swp_entry(type, offset) \
     313             :         ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
     314             : #define __pte_to_swp_entry(pte) \
     315             :         ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
     316             : #define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
     317             : 
     318             : #define kern_addr_valid(addr) (1)
     319             : 
     320             : /* Clear a kernel PTE and flush it from the TLB */
     321             : #define kpte_clear_flush(ptep, vaddr)           \
     322             : do {                                            \
     323             :         pte_clear(&init_mm, (vaddr), (ptep));       \
     324             :         __flush_tlb_one((vaddr));               \
     325             : } while (0)
     326             : 
     327             : #endif

Generated by: LCOV version 1.14