1 #ifndef _I386_PGTABLE_H 2 #define _I386_PGTABLE_H 5 * Define CONFIG_PENTIUM_MM if you want the 4MB page table optimizations. 6 * This works only on a intel Pentium. 8 #define CONFIG_PENTIUM_MM 1 11 * The Linux memory management assumes a three-level page table setup. On 12 * the i386, we use that, but "fold" the mid level into the top-level page 13 * table, so that we physically have the same two-level page table as the 16 * This file contains the functions and defines necessary to modify and use 17 * the i386 page table tree. 20 /* PMD_SHIFT determines the size of the area a second-level page table can map */ 22 #define PMD_SIZE (1UL << PMD_SHIFT) 23 #define PMD_MASK (~(PMD_SIZE-1)) 25 /* PGDIR_SHIFT determines what a third-level page table entry can map */ 26 #define PGDIR_SHIFT 22 27 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 28 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 31 * entries per page directory level: the i386 is two-level, so 32 * we don't really have any PMD directory physically. 34 #define PTRS_PER_PTE 1024 35 #define PTRS_PER_PMD 1 36 #define PTRS_PER_PGD 1024 38 /* Just any arbitrary offset to the start of the vmalloc VM area: the 39 * current 8MB value just means that there will be a 8MB "hole" after the 40 * physical memory until the kernel virtual memory starts. That means that 41 * any out-of-bounds memory accesses will hopefully be caught. 42 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 43 * area for the same reason. ;) 45 #define VMALLOC_OFFSET (8*1024*1024) 46 #define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 47 #define VMALLOC_VMADDR(x) (TASK_SIZE + (unsigned long)(x)) 50 * The 4MB page is guessing.. Detailed in the infamous "Chapter H" 51 * of the Pentium details, but assuming intel did the straigtforward 52 * thing, this bit set in the page directory entry just means that 53 * the page directory entry points directly to a 4MB-aligned block of 56 #define _PAGE_PRESENT 0x001 57 #define _PAGE_RW 0x002 58 #define _PAGE_USER 0x004 59 #define _PAGE_PCD 0x010 60 #define _PAGE_ACCESSED 0x020 61 #define _PAGE_DIRTY 0x040 62 #define _PAGE_4M 0x080/* 4 MB page, Pentium+.. */ 63 #define _PAGE_COW 0x200/* implemented in software (one of the AVL bits) */ 65 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 66 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 68 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 69 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) 70 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_COW) 71 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 72 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 75 * The i386 can't do page protection for execute, and considers that the same are read. 76 * Also, write permissions imply read permissions. This is the closest we can get.. 78 #define __P000 PAGE_NONE 79 #define __P001 PAGE_READONLY 80 #define __P010 PAGE_COPY 81 #define __P011 PAGE_COPY 82 #define __P100 PAGE_READONLY 83 #define __P101 PAGE_READONLY 84 #define __P110 PAGE_COPY 85 #define __P111 PAGE_COPY 87 #define __S000 PAGE_NONE 88 #define __S001 PAGE_READONLY 89 #define __S010 PAGE_SHARED 90 #define __S011 PAGE_SHARED 91 #define __S100 PAGE_READONLY 92 #define __S101 PAGE_READONLY 93 #define __S110 PAGE_SHARED 94 #define __S111 PAGE_SHARED 97 * Define this if things work differently on a i386 and a i486: 98 * it will (on a i486) warn about kernel memory accesses that are 99 * done without a 'verify_area(VERIFY_WRITE,..)' 101 #undef CONFIG_TEST_VERIFY_AREA 103 /* page table for 0-4MB for everybody */ 104 externunsigned long pg0
[1024]; 105 /* zero page used for unitialized stuff */ 106 externunsigned long empty_zero_page
[1024]; 109 * BAD_PAGETABLE is used when we need a bogus page-table, while 110 * BAD_PAGE is used for a bogus page. 112 * ZERO_PAGE is a global shared page that is always zero: used 113 * for zero-mapped memory areas etc.. 115 extern pte_t
__bad_page(void); 116 extern pte_t
*__bad_pagetable(void); 118 #define BAD_PAGETABLE __bad_pagetable() 119 #define BAD_PAGE __bad_page() 120 #define ZERO_PAGE ((unsigned long) empty_zero_page) 122 /* number of bits that fit into a memory pointer */ 123 #define BITS_PER_PTR (8*sizeof(unsigned long)) 125 /* to align the pointer to a pointer address */ 126 #define PTR_MASK (~(sizeof(void*)-1)) 128 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ 129 /* 64-bit machines, beware! SRB. */ 130 #define SIZEOF_PTR_LOG2 2 132 /* to find an entry in a page-table */ 133 #define PAGE_PTR(address) \ 134 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) 136 /* to set the page-dir */ 137 #define SET_PAGE_DIR(tsk,pgdir) \ 139 (tsk)->tss.cr3 = (unsigned long) (pgdir); \ 140 if ((tsk) == current) \ 141 __asm__ __volatile__("movl %0,%%cr3": :"a" ((tsk)->tss.cr3)); \ 144 extern inlineintpte_none(pte_t pte
) {return!pte_val(pte
); } 145 extern inlineintpte_present(pte_t pte
) {returnpte_val(pte
) & _PAGE_PRESENT
; } 146 extern inlineintpte_inuse(pte_t
*ptep
) {return mem_map
[MAP_NR(ptep
)] !=1; } 147 extern inlinevoidpte_clear(pte_t
*ptep
) {pte_val(*ptep
) =0; } 148 extern inlinevoidpte_reuse(pte_t
* ptep
) 150 if(!(mem_map
[MAP_NR(ptep
)] & MAP_PAGE_RESERVED
)) 151 mem_map
[MAP_NR(ptep
)]++; 154 extern inlineintpmd_none(pmd_t pmd
) {return!pmd_val(pmd
); } 155 extern inlineintpmd_bad(pmd_t pmd
) {return(pmd_val(pmd
) & ~PAGE_MASK
) != _PAGE_TABLE
||pmd_val(pmd
) > high_memory
; } 156 extern inlineintpmd_present(pmd_t pmd
) {returnpmd_val(pmd
) & _PAGE_PRESENT
; } 157 #ifdef CONFIG_PENTIUM_MM 158 extern inlineintpmd_inuse(pmd_t
*pmdp
) {return(pmd_val(*pmdp
) & _PAGE_4M
) !=0; } 160 extern inlineintpmd_inuse(pmd_t
*pmdp
) {return0; } 162 extern inlinevoidpmd_clear(pmd_t
* pmdp
) {pmd_val(*pmdp
) =0; } 163 extern inlinevoidpmd_reuse(pmd_t
* pmdp
) { } 166 * The "pgd_xxx()" functions here are trivial for a folded two-level 167 * setup: the pgd is never bad, and a pmd always exists (as it's folded 168 * into the pgd entry) 170 extern inlineintpgd_none(pgd_t pgd
) {return0; } 171 extern inlineintpgd_bad(pgd_t pgd
) {return0; } 172 extern inlineintpgd_present(pgd_t pgd
) {return1; } 173 extern inlineintpgd_inuse(pgd_t
* pgdp
) {return mem_map
[MAP_NR(pgdp
)] !=1; } 174 extern inlinevoidpgd_clear(pgd_t
* pgdp
) { } 175 extern inlinevoidpgd_reuse(pgd_t
* pgdp
) 177 if(!(mem_map
[MAP_NR(pgdp
)] & MAP_PAGE_RESERVED
)) 178 mem_map
[MAP_NR(pgdp
)]++; 182 * The following only work if pte_present() is true. 183 * Undefined behaviour if not.. 185 extern inlineintpte_read(pte_t pte
) {returnpte_val(pte
) & _PAGE_USER
; } 186 extern inlineintpte_write(pte_t pte
) {returnpte_val(pte
) & _PAGE_RW
; } 187 extern inlineintpte_exec(pte_t pte
) {returnpte_val(pte
) & _PAGE_USER
; } 188 extern inlineintpte_dirty(pte_t pte
) {returnpte_val(pte
) & _PAGE_DIRTY
; } 189 extern inlineintpte_young(pte_t pte
) {returnpte_val(pte
) & _PAGE_ACCESSED
; } 190 extern inlineintpte_cow(pte_t pte
) {returnpte_val(pte
) & _PAGE_COW
; } 192 extern inline pte_t
pte_wrprotect(pte_t pte
) {pte_val(pte
) &= ~_PAGE_RW
;return pte
; } 193 extern inline pte_t
pte_rdprotect(pte_t pte
) {pte_val(pte
) &= ~_PAGE_USER
;return pte
; } 194 extern inline pte_t
pte_exprotect(pte_t pte
) {pte_val(pte
) &= ~_PAGE_USER
;return pte
; } 195 extern inline pte_t
pte_mkclean(pte_t pte
) {pte_val(pte
) &= ~_PAGE_DIRTY
;return pte
; } 196 extern inline pte_t
pte_mkold(pte_t pte
) {pte_val(pte
) &= ~_PAGE_ACCESSED
;return pte
; } 197 extern inline pte_t
pte_uncow(pte_t pte
) {pte_val(pte
) &= ~_PAGE_COW
;return pte
; } 198 extern inline pte_t
pte_mkwrite(pte_t pte
) {pte_val(pte
) |= _PAGE_RW
;return pte
; } 199 extern inline pte_t
pte_mkread(pte_t pte
) {pte_val(pte
) |= _PAGE_USER
;return pte
; } 200 extern inline pte_t
pte_mkexec(pte_t pte
) {pte_val(pte
) |= _PAGE_USER
;return pte
; } 201 extern inline pte_t
pte_mkdirty(pte_t pte
) {pte_val(pte
) |= _PAGE_DIRTY
;return pte
; } 202 extern inline pte_t
pte_mkyoung(pte_t pte
) {pte_val(pte
) |= _PAGE_ACCESSED
;return pte
; } 203 extern inline pte_t
pte_mkcow(pte_t pte
) {pte_val(pte
) |= _PAGE_COW
;return pte
; } 206 * Conversion functions: convert a page and protection to a page entry, 207 * and a page entry and page directory to the page they refer to. 209 extern inline pte_t
mk_pte(unsigned long page
, pgprot_t pgprot
) 210 { pte_t pte
;pte_val(pte
) = page
|pgprot_val(pgprot
);return pte
; } 212 extern inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
) 213 {pte_val(pte
) = (pte_val(pte
) & _PAGE_CHG_MASK
) |pgprot_val(newprot
);return pte
; } 215 extern inlineunsigned longpte_page(pte_t pte
) 216 {returnpte_val(pte
) & PAGE_MASK
; } 218 extern inlineunsigned longpmd_page(pmd_t pmd
) 219 {returnpmd_val(pmd
) & PAGE_MASK
; } 221 /* to find an entry in a page-table-directory */ 222 extern inline pgd_t
*pgd_offset(struct task_struct
* tsk
,unsigned long address
) 224 return(pgd_t
*) tsk
->tss
.cr3
+ (address
>> PGDIR_SHIFT
); 227 /* Find an entry in the second-level page table.. */ 228 extern inline pmd_t
*pmd_offset(pgd_t
* dir
,unsigned long address
) 233 /* Find an entry in the third-level page table.. */ 234 extern inline pte_t
*pte_offset(pmd_t
* dir
,unsigned long address
) 236 return(pte_t
*)pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
-1)); 240 * Allocate and free page tables. The xxx_kernel() versions are 241 * used to allocate a kernel page table - this turns on ASN bits 242 * if any, and marks the page tables reserved. 244 extern inlinevoidpte_free_kernel(pte_t
* pte
) 246 mem_map
[MAP_NR(pte
)] =1; 247 free_page((unsigned long) pte
); 250 extern inline pte_t
*pte_alloc_kernel(pmd_t
* pmd
,unsigned long address
) 252 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
-1); 254 pte_t
* page
= (pte_t
*)get_free_page(GFP_KERNEL
); 257 pmd_val(*pmd
) = _PAGE_TABLE
| (unsigned long) page
; 258 mem_map
[MAP_NR(page
)] = MAP_PAGE_RESERVED
; 259 return page
+ address
; 261 pmd_val(*pmd
) = _PAGE_TABLE
| (unsigned long) BAD_PAGETABLE
; 264 free_page((unsigned long) page
); 267 printk("Bad pmd in pte_alloc: %08lx\n",pmd_val(*pmd
)); 268 pmd_val(*pmd
) = _PAGE_TABLE
| (unsigned long) BAD_PAGETABLE
; 271 return(pte_t
*)pmd_page(*pmd
) + address
; 275 * allocating and freeing a pmd is trivial: the 1-entry pmd is 276 * inside the pgd, so has no extra memory associated with it. 278 extern inlinevoidpmd_free_kernel(pmd_t
* pmd
) 283 extern inline pmd_t
*pmd_alloc_kernel(pgd_t
* pgd
,unsigned long address
) 288 extern inlinevoidpte_free(pte_t
* pte
) 290 free_page((unsigned long) pte
); 293 extern inline pte_t
*pte_alloc(pmd_t
* pmd
,unsigned long address
) 295 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
-1); 297 pte_t
* page
= (pte_t
*)get_free_page(GFP_KERNEL
); 300 pmd_val(*pmd
) = _PAGE_TABLE
| (unsigned long) page
; 301 return page
+ address
; 303 pmd_val(*pmd
) = _PAGE_TABLE
| (unsigned long) BAD_PAGETABLE
; 306 free_page((unsigned long) page
); 309 printk("Bad pmd in pte_alloc: %08lx\n",pmd_val(*pmd
)); 310 pmd_val(*pmd
) = _PAGE_TABLE
| (unsigned long) BAD_PAGETABLE
; 313 return(pte_t
*)pmd_page(*pmd
) + address
; 317 * allocating and freeing a pmd is trivial: the 1-entry pmd is 318 * inside the pgd, so has no extra memory associated with it. 320 extern inlinevoidpmd_free(pmd_t
* pmd
) 325 extern inline pmd_t
*pmd_alloc(pgd_t
* pgd
,unsigned long address
) 330 extern inlinevoidpgd_free(pgd_t
* pgd
) 332 free_page((unsigned long) pgd
); 335 extern inline pgd_t
*pgd_alloc(void) 337 return(pgd_t
*)get_free_page(GFP_KERNEL
); 340 extern pgd_t swapper_pg_dir
[1024]; 343 * The i386 doesn't have any external MMU info: the kernel page 344 * tables contain all the necessary information. 346 extern inlinevoidupdate_mmu_cache(struct vm_area_struct
* vma
, 347 unsigned long address
, pte_t pte
) 351 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) 352 #define SWP_OFFSET(entry) ((entry) >> 8) 353 #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8)) 355 #endif/* _I386_PAGE_H */