__bad_page(void)
{
memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
- return pte_mkdirty(mk_pte(mem_map + MAP_NR(EMPTY_PGE), PAGE_SHARED));
+ return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
}
void
@@ -325,8+325,8 @@ free_initmem (void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map+MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
@@ -534,8+534,8 @@ void __init paging_init(struct meminfo *mi) memzero(zero_page, PAGE_SIZE);
memzero(bad_page, PAGE_SIZE);
- empty_zero_page = mem_map + MAP_NR(zero_page);
- empty_bad_page = mem_map + MAP_NR(bad_page);
+ empty_zero_page = virt_to_page(zero_page);
+ empty_bad_page = virt_to_page(bad_page);
empty_bad_pte_table = ((pte_t *)bad_table) + TABLE_OFFSET;
}
@@ -598,7+598,7 @@ void __init mem_init(void) static inline void free_area(unsigned long addr, unsigned long end, char *s)
{
unsigned int size = (end - addr) >> 10;
- struct page *page = mem_map + MAP_NR(addr);
+ struct page *page = virt_to_page(addr);
for (; addr < end; addr += PAGE_SIZE, page ++) {
ClearPageReserved(page);
@@ -632,8+632,8 @@ void free_initrd_mem(unsigned long start, unsigned long end)
if (!keep_initrd) {
for (addr = start; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map+MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
@@ -417,8+417,8 @@ static inline void free_memmap(unsigned long start, unsigned long end) start = __phys_to_virt(start);
end = __phys_to_virt(end);
- pg = PAGE_ALIGN((unsigned long)(mem_map + MAP_NR(start)));
- pgend = ((unsigned long)(mem_map + MAP_NR(end))) & PAGE_MASK;
+ pg = PAGE_ALIGN((unsigned long)(virt_to_page(start)));
+ pgend = ((unsigned long)(virt_to_page(end))) & PAGE_MASK;
start = __virt_to_phys(pg);
end = __virt_to_phys(pgend);
@@ -142,12+142,10 @@ no_page: static void __free_small_page(unsigned long spage, struct order *order)
{
unsigned long flags;
- unsigned long nr;
struct page *page;
- nr = MAP_NR(spage);
- if (nr < max_mapnr) {
- page = mem_map + nr;
+ page = virt_to_page(spage);
+ if (VALID_PAGE(page)) {
/*
* The container-page must be marked Reserved
@@ -566,8+566,6 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
highmem_start_page = mem_map + highstart_pfn;
- /* cache the highmem_mapnr */
- highmem_mapnr = highstart_pfn;
max_mapnr = num_physpages = highend_pfn;
#else
max_mapnr = num_physpages = max_low_pfn;
@@ -642,8+640,8 @@ void free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map+MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
@@ -656,8+654,8 @@ void free_initrd_mem(unsigned long start, unsigned long end) if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
@@ -121,15+121,14 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag */
if (phys_addr < virt_to_phys(high_memory)) {
char *t_addr, *t_end;
- int i;
+ struct page *page;
t_addr = __va(phys_addr);
t_end = t_addr + (size - 1);
- for(i = MAP_NR(t_addr); i < MAP_NR(t_end); i++) {
- if(!PageReserved(mem_map + i))
+ for(page = virt_to_page(t_addr); page < virt_to_page(t_end); page++)
+ if(!PageReserved(page))
return NULL;
- }
}
/*
@@ -81,9+81,9 @@ void ia64_elf32_init(struct pt_regs *regs) {
int nr;
- put_shared_page(current, mem_map + MAP_NR(ia32_gdt_table), IA32_PAGE_OFFSET);
+ put_shared_page(current, virt_to_page(ia32_gdt_table), IA32_PAGE_OFFSET);
if (PAGE_SHIFT <= IA32_PAGE_SHIFT)
- put_shared_page(current, mem_map + MAP_NR(ia32_tss), IA32_PAGE_OFFSET + PAGE_SIZE);
+ put_shared_page(current, virt_to_page(ia32_tss), IA32_PAGE_OFFSET + PAGE_SIZE);
nr = smp_processor_id();
@@ -173,8+173,8 @@ free_initmem (void)
addr = (unsigned long) &__init_begin;
for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) {
- clear_bit(PG_reserved, &mem_map[MAP_NR(addr)].flags);
- set_page_count(&mem_map[MAP_NR(addr)], 1);
+ clear_bit(PG_reserved, &virt_to_page(addr)->flags);
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
++totalram_pages;
}
@@ -188,8+188,8 @@ free_initrd_mem(unsigned long start, unsigned long end) if (start < end)
printk ("Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- clear_bit(PG_reserved, &mem_map[MAP_NR(start)].flags);
- set_page_count(&mem_map[MAP_NR(start)], 1);
+ clear_bit(PG_reserved, &virt_to_page(start)->flags);
+ set_page_count(virt_to_page(start), 1);
free_page(start);
++totalram_pages;
}
@@ -372,7+372,7 @@ count_reserved_pages (u64 start, u64 end, void *arg) unsigned long *count = arg;
struct page *pg;
- for (pg = mem_map + MAP_NR(start); pg < mem_map + MAP_NR(end); ++pg)
+ for (pg = virt_to_page(start); pg < virt_to_page(end); ++pg)
if (PageReserved(pg))
++num_reserved;
*count += num_reserved;
@@ -409,7+409,7 @@ mem_init (void) datasize >> 10, initsize >> 10);
/* install the gate page in the global page table: */
- put_gate_page(mem_map + MAP_NR(__start_gate_section), GATE_ADDR);
+ put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
#ifndef CONFIG_IA64_SOFTSDV_HACKS
/*
@@ -305,7+305,7 @@ void __init atari_stram_reserve_pages(unsigned long start_mem)
/* always reserve first page of ST-RAM, the first 2 kB are
* supervisor-only! */
- set_bit( PG_reserved, &mem_map[MAP_NR(stram_start)].flags );
+ set_bit( PG_reserved, &virt_to_page(stram_start)->flags );
#ifdef CONFIG_STRAM_SWAP
if (!max_swap_size) {
@@ -699,7+699,7 @@ static inline void unswap_pte(struct vm_area_struct * vma, unsigned long if (pte_page(pte) != page)
return;
if (0 /* isswap */)
- mem_map[MAP_NR(pte_page(pte))].offset = page;
+ virt_to_page(pte_page(pte))->offset = page;
else
/* We will be removing the swap cache in a moment, so... */
set_pte(dir, pte_mkdirty(pte));
@@ -716,7+716,7 @@ static inline void unswap_pte(struct vm_area_struct * vma, unsigned long DPRINTK( "unswap_pte: replacing entry %08lx by new page %08lx",
entry, page );
set_pte(dir, pte_mkdirty(__mk_pte(page,vma->vm_page_prot)));
- atomic_inc(&mem_map[MAP_NR(page)].count);
+ atomic_inc(&virt_to_page(page)->count);
++vma->vm_mm->rss;
}
swap_free(entry);
@@ -1291,7+1291,7 @@ static int get_gfp_order( unsigned long size ) /* reserve a range of pages in mem_map[] */
static void reserve_region( unsigned long addr, unsigned long end )
{
- mem_map_t *mapp = &mem_map[MAP_NR(addr)];
+ mem_map_t *mapp = virt_to_page(addr);
for( ; addr < end; addr += PAGE_SIZE, ++mapp )
set_bit( PG_reserved, &mapp->flags );
@@ -153,10+153,10 @@ void __init mem_init(void) #if 0
#ifndef CONFIG_SUN3
if (virt_to_phys ((void *)tmp) >= mach_max_dma_address)
- clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
+ clear_bit(PG_DMA, &virt_to_page(tmp)->flags);
#endif
#endif
- if (PageReserved(mem_map+MAP_NR(tmp))) {
+ if (PageReserved(virt_to_page(tmp))) {
if (tmp >= (unsigned long)&_text
&& tmp < (unsigned long)&_etext)
codepages++;
@@ -168,7+168,7 @@ void __init mem_init(void) continue;
}
#if 0
- set_page_count(mem_map+MAP_NR(tmp), 1);
+ set_page_count(virt_to_page(tmp), 1);
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start ||
(tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end))
@@ -202,8+202,8 @@ void __init mem_init(void) void free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
@@ -93,7+93,7 @@ pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset) typedef struct list_head ptable_desc;
static LIST_HEAD(ptable_list);
-#define PD_PTABLE(page) ((ptable_desc *)&mem_map[MAP_NR(page)])
+#define PD_PTABLE(page) ((ptable_desc *)virt_to_page(page))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, list))
#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
@@ -293,8+293,8 @@ void free_initmem(void)
addr = (unsigned long)&__init_begin;
for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
- mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
- set_page_count(mem_map+MAP_NR(addr), 1);
+ virt_to_page(addr)->flags &= ~(1 << PG_reserved);
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
}
}
@@ -228,8+228,8 @@ prom_free_prom_memory (void)
addr = PAGE_OFFSET + p->base;
while (addr < p->base + p->size) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map + MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
addr += PAGE_SIZE;
freed += PAGE_SIZE;
@@ -149,8+149,8 @@ void prom_free_prom_memory (void)
addr = PAGE_SIZE;
while (addr < end) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map + MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
addr += PAGE_SIZE;
}
@@ -120,7+120,8 @@ unsigned long empty_zero_page, zero_page_mask;
static inline unsigned long setup_zero_pages(void)
{
- unsigned long order, size, pg;
+ unsigned long order, size;
+ struct page *page;
switch (mips_cputype) {
case CPU_R4000SC:
@@ -137,11+138,11 @@ static inline unsigned long setup_zero_pages(void) if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
- pg = MAP_NR(empty_zero_page);
- while (pg < MAP_NR(empty_zero_page) + (1 << order)) {
- set_bit(PG_reserved, &mem_map[pg].flags);
- set_page_count(mem_map + pg, 0);
- pg++;
+ page = virt_to_page(empty_zero_page);
+ while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
+ set_bit(PG_reserved, &page->flags);
+ set_page_count(page, 0);
+ page++;
}
size = PAGE_SIZE << order;
@@ -309,8+310,8 @@ void __init mem_init(void) void free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
@@ -329,8+330,8 @@ void free_initmem(void)
addr = (unsigned long) &__init_begin;
while (addr < (unsigned long) &__init_end) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map + MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
addr += PAGE_SIZE;
@@ -115,10+115,10 @@ void *vmalloc_uncached (unsigned long size) static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return;
- __free_page(pte_page(page));
+ __free_page(ptpage);
if (current->mm->rss <= 0)
return;
current->mm->rss--;
@@ -233,8+233,8 @@ prom_free_prom_memory (void) addr = PAGE_OFFSET + (unsigned long) (long) p->base;
end = addr + (unsigned long) (long) p->size;
while (addr < end) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map + MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
addr += PAGE_SIZE;
freed += PAGE_SIZE;
@@ -215,7+215,8 @@ unsigned long empty_zero_page, zero_page_mask;
unsigned long setup_zero_pages(void)
{
- unsigned long order, size, pg;
+ unsigned long order, size;
+ struct page *page;
switch (mips_cputype) {
case CPU_R4000SC:
@@ -232,11+233,11 @@ unsigned long setup_zero_pages(void) if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
- pg = MAP_NR(empty_zero_page);
- while (pg < MAP_NR(empty_zero_page) + (1 << order)) {
- set_bit(PG_reserved, &mem_map[pg].flags);
- set_page_count(mem_map + pg, 0);
- pg++;
+ page = virt_to_page(empty_zero_page);
+ while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
+ set_bit(PG_reserved, &page->flags);
+ set_page_count(page, 0);
+ page++;
}
size = PAGE_SIZE << order;
@@ -374,8+375,8 @@ void __init mem_init(void) void free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
@@ -396,8+397,8 @@ free_initmem(void) addr = (unsigned long)(&__init_begin);
while (addr < (unsigned long)&__init_end) {
page = PAGE_OFFSET | CPHYSADDR(addr);
- ClearPageReserved(mem_map + MAP_NR(page));
- set_page_count(mem_map + MAP_NR(page), 1);
+ ClearPageReserved(virt_to_page(page));
+ set_page_count(virt_to_page(page), 1);
free_page(page);
totalram_pages++;
addr += PAGE_SIZE;
@@ -109,10+109,10 @@ void *vmalloc_uncached (unsigned long size) static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return;
- __free_page(pte_page(page));
+ __free_page(ptpage);
if (current->mm->rss <= 0)
return;
current->mm->rss--;
@@ -797,8+797,8 @@ void __init free_initmem(void) #define FREESEC(START,END,CNT) do { \
a = (unsigned long)(&START); \
for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \
- clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \
- set_page_count(mem_map+MAP_NR(a), 1); \
+ clear_bit(PG_reserved, &virt_to_page(a)->flags); \
+ set_page_count(virt_to_page(a), 1); \
free_page(a); \
CNT++; \
} \
@@ -865,8+865,8 @@ void __init free_initmem(void) void free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
@@ -1187,7+1187,7 @@ void __init mem_init(void) make sure the ramdisk pages aren't reserved. */
if (initrd_start) {
for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE)
- clear_bit(PG_reserved, &mem_map[MAP_NR(addr)].flags);
+ clear_bit(PG_reserved, &virt_to_page(addr)->flags);
}
#endif /* CONFIG_BLK_DEV_INITRD */
@@ -1196,17+1196,17 @@ void __init mem_init(void) if ( rtas_data )
for (addr = rtas_data; addr < PAGE_ALIGN(rtas_data+rtas_size) ;
addr += PAGE_SIZE)
- SetPageReserved(mem_map + MAP_NR(addr));
+ SetPageReserved(virt_to_page(addr));
#endif /* defined(CONFIG_ALL_PPC) */
if ( sysmap_size )
for (addr = (unsigned long)sysmap;
addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;
addr += PAGE_SIZE)
- SetPageReserved(mem_map + MAP_NR(addr));
+ SetPageReserved(virt_to_page(addr));
for (addr = PAGE_OFFSET; addr < (unsigned long)end_of_DRAM;
addr += PAGE_SIZE) {
- if (!PageReserved(mem_map + MAP_NR(addr)))
+ if (!PageReserved(virt_to_page(addr)))
continue;
if (addr < (ulong) etext)
codepages++;
@@ -351,8+351,8 @@ void free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map+MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
@@ -366,8+366,8 @@ void free_initrd_mem(unsigned long start, unsigned long end) if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
@@ -270,8+270,8 @@ void free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map+MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
@@ -283,8+283,8 @@ void free_initrd_mem(unsigned long start, unsigned long end) {
unsigned long p;
for (p = start; p < end; p += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(p));
- set_page_count(mem_map+MAP_NR(p), 1);
+ ClearPageReserved(virt_to_page(p));
+ set_page_count(virt_to_page(p), 1);
free_page(p);
totalram_pages++;
}
@@ -298,20+298,20 @@ void __init smp4d_boot_cpus(void) }
/* Free unneeded trap tables */
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu1));
- set_page_count(mem_map + MAP_NR(trapbase_cpu1), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu1));
+ set_page_count(virt_to_page(trapbase_cpu1), 1);
free_page((unsigned long)trapbase_cpu1);
totalram_pages++;
num_physpages++;
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu2));
- set_page_count(mem_map + MAP_NR(trapbase_cpu2), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu2));
+ set_page_count(virt_to_page(trapbase_cpu2), 1);
free_page((unsigned long)trapbase_cpu2);
totalram_pages++;
num_physpages++;
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu3));
- set_page_count(mem_map + MAP_NR(trapbase_cpu3), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu3));
+ set_page_count(virt_to_page(trapbase_cpu3), 1);
free_page((unsigned long)trapbase_cpu3);
totalram_pages++;
num_physpages++;
@@ -279,22+279,22 @@ void __init smp4m_boot_cpus(void)
/* Free unneeded trap tables */
if (!(cpu_present_map & (1 << 1))) {
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu1));
- set_page_count(mem_map + MAP_NR(trapbase_cpu1), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu1));
+ set_page_count(virt_to_page(trapbase_cpu1), 1);
free_page((unsigned long)trapbase_cpu1);
totalram_pages++;
num_physpages++;
}
if (!(cpu_present_map & (1 << 2))) {
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu2));
- set_page_count(mem_map + MAP_NR(trapbase_cpu2), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu2));
+ set_page_count(virt_to_page(trapbase_cpu2), 1);
free_page((unsigned long)trapbase_cpu2);
totalram_pages++;
num_physpages++;
}
if (!(cpu_present_map & (1 << 3))) {
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu3));
- set_page_count(mem_map + MAP_NR(trapbase_cpu3), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu3));
+ set_page_count(virt_to_page(trapbase_cpu3), 1);
free_page((unsigned long)trapbase_cpu3);
totalram_pages++;
num_physpages++;
@@ -18,14+18,14 @@ static inline void forget_pte(pte_t page) if (pte_none(page))
return;
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return;
/*
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
- free_page_and_swap_cache(mem_map+nr);
+ free_page_and_swap_cache(ptpage);
return;
}
swap_free(pte_to_swp_entry(page));
@@ -369,8+369,8 @@ void __init free_mem_map_range(struct page *first, struct page *last) prom_printf("[%p,%p] ", first, last);
#endif
while (first < last) {
- ClearPageReserved(mem_map + MAP_NR(first));
- set_page_count(mem_map + MAP_NR(first), 1);
+ ClearPageReserved(virt_to_page(first));
+ set_page_count(virt_to_page(first), 1);
free_page((unsigned long)first);
totalram_pages++;
num_physpages++;
@@ -542,7+542,7 @@ void free_initmem (void) struct page *p;
page = addr + phys_base;
- p = mem_map + MAP_NR(page);
+ p = virt_to_page(page);
ClearPageReserved(p);
set_page_count(p, 1);
@@ -559,7+559,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- struct page *p = mem_map + MAP_NR(start);
+ struct page *p = virt_to_page(start);
ClearPageReserved(p);
set_page_count(p, 1);
@@ -190,7+190,7 @@ static void iounit_map_dma_area(unsigned long va, __u32 addr, int len) pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset(pmdp, addr);
- set_pte(ptep, pte_val(mk_pte(mem_map + MAP_NR(page), dvma_prot)));
+ set_pte(ptep, pte_val(mk_pte(virt_to_page(page), dvma_prot)));
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
@@ -247,7+247,7 @@ static void iommu_map_dma_area(unsigned long va, __u32 addr, int len) pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset(pmdp, addr);
- set_pte(ptep, mk_pte(mem_map + MAP_NR(page), dvma_prot));
+ set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
if (ipte_cache != 0) {
iopte_val(*iopte++) = MKIOPTE(__pa(page));
} else {
@@ -612,7+612,7 @@ static void srmmu_free_task_struct(struct task_struct *tsk)
static void srmmu_get_task_struct(struct task_struct *tsk)
{
- atomic_inc(&mem_map[MAP_NR(tsk)].count);
+ atomic_inc(&virt_to_page(tsk)->count);
}
/* tsunami.S */
@@ -2153,7+2153,7 @@ void __init ld_mmu_srmmu(void) BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pte_pagenr, srmmu_pte_pagenr, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sparc_pte_pagenr, srmmu_pte_pagenr, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
@@ -1320,7+1320,7 @@ static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus unsigned long page;
page = ((unsigned long)bufptr) & PAGE_MASK;
- if (MAP_NR(page) > max_mapnr) {
+ if (!VALID_PAGE(virt_to_page(page))) {
sun4c_flush_page(page);
return (__u32)bufptr; /* already locked */
}
@@ -2095,7+2095,7 @@ static int sun4c_pmd_none(pmd_t pmd) { return !pmd_val(pmd); } static int sun4c_pmd_bad(pmd_t pmd)
{
return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
- (MAP_NR(pmd_val(pmd)) > max_mapnr));
+ (!VALID_PAGE(virt_to_page(pmd_val(pmd)))));
}
static int sun4c_pmd_present(pmd_t pmd)
@@ -2650,7+2650,7 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
- BTFIXUPSET_CALL(pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sparc_pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM);
#if PAGE_SHIFT <= 12
BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
#else
@@ -110,7+110,7 @@ static unsigned int get_user_insn(unsigned long tpc) if(!pte_present(pte))
goto out;
- pa = phys_base + (pte_pagenr(pte) << PAGE_SHIFT);
+ pa = phys_base + (sparc64_pte_pagenr(pte) << PAGE_SHIFT);
pa += (tpc & ~PAGE_MASK);
/* Use phys bypass so we don't pollute dtlb/dcache. */
@@ -18,14+18,14 @@ static inline void forget_pte(pte_t page) if (pte_none(page))
return;
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return;
/*
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
- free_page_and_swap_cache(mem_map+nr);
+ free_page_and_swap_cache(ptpage);
return;
}
swap_free(pte_to_swp_entry(page));
@@ -1242,7+1242,7 @@ void free_initmem (void) page = (addr +
((unsigned long) __va(phys_base)) -
((unsigned long) &empty_zero_page));
- p = mem_map + MAP_NR(page);
+ p = virt_to_page(page);
ClearPageReserved(p);
set_page_count(p, 1);
@@ -1257,7+1257,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- struct page *p = mem_map + MAP_NR(start);
+ struct page *p = virt_to_page(start);
ClearPageReserved(p);
set_page_count(p, 1);
@@ -326,7+326,7 @@ static int lvm_snapshot_alloc_iobuf_pages(struct kiobuf * iobuf, int sectors) if (!addr)
goto out;
iobuf->pagelist[i] = addr;
- page = mem_map + MAP_NR(addr);
+ page = virt_to_page(addr);
}
#endif
@@ -141,8+141,8 @@ static unsigned long agp_alloc_page(void) if (pt == NULL) {
return 0;
}
- atomic_inc(&mem_map[MAP_NR(pt)].count);
- set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+ atomic_inc(&virt_to_page(pt)->count);
+ set_bit(PG_locked, &virt_to_page(pt)->flags);
atomic_inc(&agp_bridge.current_memory_agp);
return (unsigned long) pt;
}
@@ -154,9+154,9 @@ static void agp_destroy_page(unsigned long page) if (pt == NULL) {
return;
}
- atomic_dec(&mem_map[MAP_NR(pt)].count);
- clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
- wake_up(&mem_map[MAP_NR(pt)].wait);
+ atomic_dec(&virt_to_page(pt)->count);
+ clear_bit(PG_locked, &virt_to_page(pt)->flags);
+ wake_up(&virt_to_page(pt)->wait);
free_page((unsigned long) pt);
atomic_dec(&agp_bridge.current_memory_agp);
}
@@ -541,6+541,7 @@ static int agp_generic_create_gatt_table(void) int num_entries;
int i;
void *temp;
+ struct page *page;
/* The generic routines can't handle 2 level gatt's */
if (agp_bridge.size_type == LVL2_APER_SIZE) {
@@ -622,9+623,8 @@ static int agp_generic_create_gatt_table(void) }
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
- for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
- set_bit(PG_reserved, &mem_map[i].flags);
- }
+ for (page = virt_to_page(table); page < get_mem_map(table_end); page++)
+ set_bit(PG_reserved, &page->flags);
agp_bridge.gatt_table_real = (unsigned long *) table;
CACHE_FLUSH();
@@ -633,9+633,8 @@ static int agp_generic_create_gatt_table(void) CACHE_FLUSH();
if (agp_bridge.gatt_table == NULL) {
- for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
- clear_bit(PG_reserved, &mem_map[i].flags);
- }
+ for (page = virt_to_page(table); page < get_mem_map(table_end); page++)
+ clear_bit(PG_reserved, &page->flags);
free_pages((unsigned long) table, page_order);
@@ -653,10+652,10 @@ static int agp_generic_create_gatt_table(void)
static int agp_generic_free_gatt_table(void)
{
- int i;
int page_order;
char *table, *table_end;
void *temp;
+ struct page *page;
temp = agp_bridge.current_size;
@@ -691,9+690,8 @@ static int agp_generic_free_gatt_table(void) table = (char *) agp_bridge.gatt_table_real;
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
- for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
- clear_bit(PG_reserved, &mem_map[i].flags);
- }
+ for (page = virt_to_page(table); page < get_mem_map(table_end); page++)
+ clear_bit(PG_reserved, &page->flags);
free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
return 0;
@@ -1500,13+1498,13 @@ static int amd_create_page_map(amd_page_map *page_map) if (page_map->real == NULL) {
return -ENOMEM;
}
- set_bit(PG_reserved, &mem_map[MAP_NR(page_map->real)].flags);
+ set_bit(PG_reserved, &virt_to_page(page_map->real)->flags);
CACHE_FLUSH();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
clear_bit(PG_reserved,
- &mem_map[MAP_NR(page_map->real)].flags);
+ &virt_to_page(page_map->real)->flags);
free_page((unsigned long) page_map->real);
page_map->real = NULL;
return -ENOMEM;
@@ -1524,7+1522,7 @@ static void amd_free_page_map(amd_page_map *page_map) {
iounmap(page_map->remapped);
clear_bit(PG_reserved,
- &mem_map[MAP_NR(page_map->real)].flags);
+ &virt_to_page(page_map->real)->flags);
free_page((unsigned long) page_map->real);
}
@@ -191,7+191,7 @@ static void * rvmalloc(signed long size) while (size > 0)
{
page = kvirt_to_pa(adr);
- mem_map_reserve(MAP_NR(__va(page)));
+ mem_map_reserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
@@ -209,7+209,7 @@ static void rvfree(void * mem, signed long size) while (size > 0)
{
page = kvirt_to_pa(adr);
- mem_map_unreserve(MAP_NR(__va(page)));
+ mem_map_unreserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
@@ -199,7+199,7 @@ static int v4l_fbuffer_alloc(struct zoran *zr) zr->v4l_gbuf[i].fbuffer_phys = virt_to_phys(mem);
zr->v4l_gbuf[i].fbuffer_bus = virt_to_bus(mem);
for (off = 0; off < v4l_bufsize; off += PAGE_SIZE)
- mem_map_reserve(MAP_NR(mem + off));
+ mem_map_reserve(virt_to_page(mem + off));
DEBUG(printk(BUZ_INFO ": V4L frame %d mem 0x%x (bus: 0x%x=%d)\n", i, mem, virt_to_bus(mem), virt_to_bus(mem)));
} else {
return -ENOBUFS;
@@ -221,7+221,7 @@ static void v4l_fbuffer_free(struct zoran *zr)
mem = zr->v4l_gbuf[i].fbuffer;
for (off = 0; off < v4l_bufsize; off += PAGE_SIZE)
- mem_map_unreserve(MAP_NR(mem + off));
+ mem_map_unreserve(virt_to_page(mem + off));
kfree((void *) zr->v4l_gbuf[i].fbuffer);
zr->v4l_gbuf[i].fbuffer = NULL;
}
@@ -286,7+286,7 @@ static int jpg_fbuffer_alloc(struct zoran *zr) zr->jpg_gbuf[i].frag_tab[0] = virt_to_bus((void *) mem);
zr->jpg_gbuf[i].frag_tab[1] = ((zr->jpg_bufsize / 4) << 1) | 1;
for (off = 0; off < zr->jpg_bufsize; off += PAGE_SIZE)
- mem_map_reserve(MAP_NR(mem + off));
+ mem_map_reserve(virt_to_page(mem + off));
} else {
/* jpg_bufsize is alreay page aligned */
for (j = 0; j < zr->jpg_bufsize / PAGE_SIZE; j++) {
@@ -297,7+297,7 @@ static int jpg_fbuffer_alloc(struct zoran *zr) }
zr->jpg_gbuf[i].frag_tab[2 * j] = virt_to_bus((void *) mem);
zr->jpg_gbuf[i].frag_tab[2 * j + 1] = (PAGE_SIZE / 4) << 1;
- mem_map_reserve(MAP_NR(mem));
+ mem_map_reserve(virt_to_page(mem));
}
zr->jpg_gbuf[i].frag_tab[2 * j - 1] |= 1;
@@ -329,7+329,7 @@ static void jpg_fbuffer_free(struct zoran *zr) if (zr->jpg_gbuf[i].frag_tab[0]) {
mem = (unsigned char *) bus_to_virt(zr->jpg_gbuf[i].frag_tab[0]);
for (off = 0; off < zr->jpg_bufsize; off += PAGE_SIZE)
- mem_map_unreserve(MAP_NR(mem + off));
+ mem_map_unreserve(virt_to_page(mem + off));
kfree((void *) mem);
zr->jpg_gbuf[i].frag_tab[0] = 0;
zr->jpg_gbuf[i].frag_tab[1] = 0;
@@ -338,7+338,7 @@ static void jpg_fbuffer_free(struct zoran *zr) for (j = 0; j < zr->jpg_bufsize / PAGE_SIZE; j++) {
if (!zr->jpg_gbuf[i].frag_tab[2 * j])
break;
- mem_map_unreserve(MAP_NR(bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j])));
+ mem_map_unreserve(virt_to_page(bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j])));
free_page((unsigned long) bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j]));
zr->jpg_gbuf[i].frag_tab[2 * j] = 0;
zr->jpg_gbuf[i].frag_tab[2 * j + 1] = 0;
@@ -239,7+239,7 @@ static void *rvmalloc(unsigned long size) adr = (unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_reserve(MAP_NR(__va(page)));
+ mem_map_reserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
@@ -263,7+263,7 @@ static void rvfree(void *mem, unsigned long size) adr = (unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_unreserve(MAP_NR(__va(page)));
+ mem_map_unreserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
@@ -282,8+282,8 @@ static unsigned long i810_alloc_page(drm_device_t *dev) if(address == 0UL)
return 0;
- atomic_inc(&mem_map[MAP_NR((void *) address)].count);
- set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags);
+ atomic_inc(&virt_to_page(address)->count);
+ set_bit(PG_locked, &virt_to_page(address)->flags);
return address;
}
@@ -293,9+293,9 @@ static void i810_free_page(drm_device_t *dev, unsigned long page) if(page == 0UL)
return;
- atomic_dec(&mem_map[MAP_NR((void *) page)].count);
- clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags);
- wake_up(&mem_map[MAP_NR((void *) page)].wait);
+ atomic_dec(&virt_to_page(page)->count);
+ clear_bit(PG_locked, &virt_to_page(page)->flags);
+ wake_up(&virt_to_page(page)->wait);
free_page(page);
return;
}
@@ -246,7+246,7 @@ unsigned long drm_alloc_pages(int order, int area) for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- mem_map_reserve(MAP_NR(addr));
+ mem_map_reserve(virt_to_page(addr));
}
return address;
@@ -267,7+267,7 @@ void drm_free_pages(unsigned long address, int order, int area) for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- mem_map_unreserve(MAP_NR(addr));
+ mem_map_unreserve(virt_to_page(addr));
}
free_pages(address, order);
}
@@ -57,8+57,8 @@ static unsigned long mga_alloc_page(drm_device_t *dev) if(address == 0UL) {
return 0;
}
- atomic_inc(&mem_map[MAP_NR((void *) address)].count);
- set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags);
+ atomic_inc(&virt_to_page(address)->count);
+ set_bit(PG_locked, &virt_to_page(address)->flags);
return address;
}
@@ -70,9+70,9 @@ static void mga_free_page(drm_device_t *dev, unsigned long page) if(page == 0UL) {
return;
}
- atomic_dec(&mem_map[MAP_NR((void *) page)].count);
- clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags);
- wake_up(&mem_map[MAP_NR((void *) page)].wait);
+ atomic_dec(&virt_to_page(page)->count);
+ clear_bit(PG_locked, &virt_to_page(page)->flags);
+ wake_up(&virt_to_page(page)->wait);
free_page(page);
return;
}
@@ -89,13+89,13 @@ struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, offset = address - vma->vm_start;
page = offset >> PAGE_SHIFT;
physical = (unsigned long)dev->lock.hw_lock + offset;
- atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
+ atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
#if LINUX_VERSION_CODE < 0x020317
return physical;
#else
- return mem_map + MAP_NR(physical);
+ return (virt_to_page(physical));
#endif
}
@@ -124,13+124,13 @@ struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page = offset >> PAGE_SHIFT;
physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
- atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
+ atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
#if LINUX_VERSION_CODE < 0x020317
return physical;
#else
- return mem_map + MAP_NR(physical);
+ return (virt_to_page(physical));
#endif
}
@@ -48,11+48,10 @@ static inline void *dmaalloc(size_t size) }
addr = __get_dma_pages(GFP_KERNEL, get_order(size));
if (addr) {
- int i;
+ struct page *page;
- for (i = MAP_NR(addr); i < MAP_NR(addr+size); i++) {
- mem_map_reserve(i);
- }
+ for (page = virt_to_page(addr); page < get_mem_map(addr+size); page++)
+ mem_map_reserve(page);
}
return (void *)addr;
}
@@ -60,12+59,11 @@ static inline void *dmaalloc(size_t size) static inline void dmafree(void *addr, size_t size)
{
if (size > 0) {
- int i;
+ struct page *page;
- for (i = MAP_NR((unsigned long)addr);
- i < MAP_NR((unsigned long)addr+size); i++) {
- mem_map_unreserve (i);
- }
+ for (page = virt_to_page((unsigned long)addr);
+ page < virt_to_page((unsigned long)addr+size); page++)
+ mem_map_unreserve(page);
free_pages((unsigned long) addr, get_order(size));
}
}
@@ -136,13+136,12 @@ static int grabbuf_alloc(struct planb *pb) |GFP_DMA, 0);
if (!pb->rawbuf[i])
break;
- set_bit(PG_reserved, &mem_map[MAP_NR(pb->rawbuf[i])].flags);
+ mem_map_reserve(virt_to_page(pb->rawbuf[i]));
}
if (i-- < npage) {
printk(KERN_DEBUG "PlanB: init_grab: grab buffer not allocated\n");
for (; i > 0; i--) {
- clear_bit(PG_reserved,
- &mem_map[MAP_NR(pb->rawbuf[i])].flags);
+ mem_map_unreserve(virt_to_page(pb->rawbuf[i]));
free_pages((unsigned long)pb->rawbuf[i], 0);
}
kfree(pb->rawbuf);
@@ -435,8+434,7 @@ static void planb_prepare_close(struct planb *pb) }
if(pb->rawbuf) {
for (i = 0; i < pb->rawbuf_size; i++) {
- clear_bit(PG_reserved,
- &mem_map[MAP_NR(pb->rawbuf[i])].flags);
+ mem_map_unreserve(virt_to_page(pb->rawbuf[i]));
free_pages((unsigned long)pb->rawbuf[i], 0);
}
kfree(pb->rawbuf);
@@ -50,7+50,7 @@ void* bmalloc(unsigned long size) if (mem) {
unsigned long adr = (unsigned long)mem;
while (size > 0) {
- mem_map_reserve(MAP_NR(phys_to_virt(adr)));
+ mem_map_reserve(virt_to_page(phys_to_virt(adr)));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
@@ -64,7+64,7 @@ void bfree(void* mem, unsigned long size) unsigned long adr = (unsigned long)mem;
unsigned long siz = size;
while (siz > 0) {
- mem_map_unreserve(MAP_NR(phys_to_virt(adr)));
+ mem_map_unreserve(virt_to_page(phys_to_virt(adr)));
adr += PAGE_SIZE;
siz -= PAGE_SIZE;
}
@@ -221,7+221,7 @@ static void * rvmalloc(unsigned long size) while (size > 0)
{
page = kvirt_to_pa(adr);
- mem_map_reserve(MAP_NR(__va(page)));
+ mem_map_reserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
@@ -239,7+239,7 @@ static void rvfree(void * mem, unsigned long size) while (size > 0)
{
page = kvirt_to_pa(adr);
- mem_map_unreserve(MAP_NR(__va(page)));
+ mem_map_unreserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
#include <linux/malloc.h>
#include <linux/soundcard.h>
#include <linux/pci.h>
+#include <linux/wrapper.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/init.h>
@@ -591,13+592,13 @@ static void start_adc(struct cm_state *s)
static void dealloc_dmabuf(struct dmabuf *db)
{
- unsigned long map, mapend;
+ struct page *pstart, *pend;
if (db->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++)
+ mem_map_unreserve(pstart);
free_pages((unsigned long)db->rawbuf, db->buforder);
}
db->rawbuf = NULL;
@@ -614,7+615,7 @@ static int prog_dmabuf(struct cm_state *s, unsigned rec) int order;
unsigned bytepersec;
unsigned bufs;
- unsigned long map, mapend;
+ struct page *pstart, *pend;
unsigned char fmt;
unsigned long flags;
@@ -646,9+647,9 @@ static int prog_dmabuf(struct cm_state *s, unsigned rec) printk(KERN_DEBUG "cmpci: DMA buffer beyond 16MB: busaddr 0x%lx size %ld\n",
virt_to_bus(db->rawbuf), PAGE_SIZE << db->buforder);
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++)
+ mem_map_reserve(pstart);
}
bytepersec = rate << sample_shift[fmt];
bufs = PAGE_SIZE << db->buforder;
@@ -56,8+56,9 @@ static long dmabuf_timeout(struct dma_buffparms *dmap) static int sound_alloc_dmap(struct dma_buffparms *dmap)
{
char *start_addr, *end_addr;
- int i, dma_pagesize;
+ int dma_pagesize;
int sz, size;
+ struct page *page;
dmap->mapping_flags &= ~DMA_MAP_MAPPED;
@@ -113,14+114,15 @@ static int sound_alloc_dmap(struct dma_buffparms *dmap) dmap->raw_buf = start_addr;
dmap->raw_buf_phys = virt_to_bus(start_addr);
- for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
- set_bit(PG_reserved, &mem_map[i].flags);;
+ for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+ mem_map_reserve(page);
return 0;
}
static void sound_free_dmap(struct dma_buffparms *dmap)
{
- int sz, size, i;
+ int sz, size;
+ struct page *page;
unsigned long start_addr, end_addr;
if (dmap->raw_buf == NULL)
@@ -132,8+134,8 @@ static void sound_free_dmap(struct dma_buffparms *dmap) start_addr = (unsigned long) dmap->raw_buf;
end_addr = start_addr + dmap->buffsize;
- for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
- clear_bit(PG_reserved, &mem_map[i].flags);;
+ for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+ mem_map_unreserve(page);
free_pages((unsigned long) dmap->raw_buf, sz);
dmap->raw_buf = NULL;
#include "audio.h"
#include <linux/sched.h>
#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
static void calculate_ofrag(struct woinst *);
static void calculate_ifrag(struct wiinst *);
@@ -918,7+919,7 @@ static int emu10k1_audio_mmap(struct file *file, struct vm_area_struct *vma)
/* Now mark the pages as reserved, otherwise remap_page_range doesn't do what we want */
for (i = 0; i < wave_out->wavexferbuf->numpages; i++)
- set_bit(PG_reserved, &mem_map[MAP_NR(wave_out->pagetable[i])].flags);
+ mem_map_reserve(virt_to_page(wave_out->pagetable[i]));
}
size = vma->vm_end - vma->vm_start;
@@ -1137,7+1138,7 @@ static int emu10k1_audio_release(struct inode *inode, struct file *file)
/* Undo marking the pages as reserved */
for (i = 0; i < woinst->wave_out->wavexferbuf->numpages; i++)
- set_bit(PG_reserved, &mem_map[MAP_NR(woinst->wave_out->pagetable[i])].flags);
+ mem_map_reserve(virt_to_page(woinst->wave_out->pagetable[i]));
}
woinst->mapped = 0;
#include <linux/soundcard.h>
#include <linux/pci.h>
#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/init.h>
@@ -540,13+541,13 @@ static void start_adc(struct es1370_state *s)
extern inline void dealloc_dmabuf(struct es1370_state *s, struct dmabuf *db)
{
- unsigned long map, mapend;
+ struct page *page, *pend;
if (db->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
}
db->rawbuf = NULL;
@@ -558,7+559,7 @@ static int prog_dmabuf(struct es1370_state *s, struct dmabuf *db, unsigned rate, int order;
unsigned bytepersec;
unsigned bufs;
- unsigned long map, mapend;
+ struct page *page, *pend;
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
@@ -570,9+571,9 @@ static int prog_dmabuf(struct es1370_state *s, struct dmabuf *db, unsigned rate, return -ENOMEM;
db->buforder = order;
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
}
fmt &= ES1370_FMT_MASK;
bytepersec = rate << sample_shift[fmt];
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/ac97_codec.h>
+#include <linux/wrapper.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/uaccess.h>
@@ -872,13+873,13 @@ static void start_adc(struct es1371_state *s)
extern inline void dealloc_dmabuf(struct es1371_state *s, struct dmabuf *db)
{
- unsigned long map, mapend;
+ struct page *page, *pend;
if (db->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
}
db->rawbuf = NULL;
@@ -890,7+891,7 @@ static int prog_dmabuf(struct es1371_state *s, struct dmabuf *db, unsigned rate, int order;
unsigned bytepersec;
unsigned bufs;
- unsigned long map, mapend;
+ struct page *page, *pend;
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
@@ -902,9+903,9 @@ static int prog_dmabuf(struct es1371_state *s, struct dmabuf *db, unsigned rate, return -ENOMEM;
db->buforder = order;
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
}
fmt &= ES1371_FMT_MASK;
bytepersec = rate << sample_shift[fmt];
#include <linux/poll.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
@@ -403,13+404,13 @@ static void start_adc(struct solo1_state *s)
extern inline void dealloc_dmabuf(struct solo1_state *s, struct dmabuf *db)
{
- unsigned long map, mapend;
+ struct page *page, *pend;
if (db->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
}
db->rawbuf = NULL;
@@ -421,7+422,7 @@ static int prog_dmabuf(struct solo1_state *s, struct dmabuf *db) int order;
unsigned bytespersec;
unsigned bufs, sample_shift = 0;
- unsigned long map, mapend;
+ struct page *page, *pend;
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
@@ -433,9+434,9 @@ static int prog_dmabuf(struct solo1_state *s, struct dmabuf *db) return -ENOMEM;
db->buforder = order;
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
}
if (s->fmt & (AFMT_S16_LE | AFMT_U16_LE))
sample_shift++;
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/ac97_codec.h>
+#include <linux/wrapper.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
@@ -628,7+629,7 @@ static int alloc_dmabuf(struct i810_state *state) struct dmabuf *dmabuf = &state->dmabuf;
void *rawbuf;
int order;
- unsigned long map, mapend;
+ struct page *page, *pend;
/* alloc as big a chunk as we can, FIXME: is this necessary ?? */
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
@@ -649,9+650,9 @@ static int alloc_dmabuf(struct i810_state *state) dmabuf->buforder = order;
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1);
- for (map = MAP_NR(rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
+ for (page = virt_to_page(rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
return 0;
}
@@ -660,13+661,13 @@ static int alloc_dmabuf(struct i810_state *state) static void dealloc_dmabuf(struct i810_state *state)
{
struct dmabuf *dmabuf = &state->dmabuf;
- unsigned long map, mapend;
+ struct page *page, *pend;
if (dmabuf->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
- for (map = MAP_NR(dmabuf->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
+ for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(state->card->pci_dev, PAGE_SIZE << dmabuf->buforder,
dmabuf->rawbuf, dmabuf->dma_handle);
}
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
@@ -2819,7+2820,7 @@ allocate_buffers(struct ess_state *s) {
void *rawbuf=NULL;
int order,i;
- unsigned long mapend,map;
+ struct page *page, *pend;
/* alloc as big a chunk as we can */
for (order = (dsps_order + (16-PAGE_SHIFT) + 1); order >= (dsps_order + 2 + 1); order--)
@@ -2865,17+2866,16 @@ allocate_buffers(struct ess_state *s) }
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1);
- for (map = MAP_NR(rawbuf); map <= mapend; map++) {
- set_bit(PG_reserved, &mem_map[map].flags);
- }
+ pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
+ for (page = virt_to_page(rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
return 0;
}
static void
free_buffers(struct ess_state *s)
{
- unsigned long map, mapend;
+ struct page *page, *pend;
s->dma_dac.rawbuf = s->dma_adc.rawbuf = NULL;
s->dma_dac.mapped = s->dma_adc.mapped = 0;
@@ -2884,9+2884,9 @@ free_buffers(struct ess_state *s) M_printk("maestro: freeing %p\n",s->card->dmapages);
/* undo marking the pages as reserved */
- mapend = MAP_NR(s->card->dmapages + (PAGE_SIZE << s->card->dmaorder) - 1);
- for (map = MAP_NR(s->card->dmapages); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(s->card->dmapages + (PAGE_SIZE << s->card->dmaorder) - 1);
+ for (page = virt_to_page(s->card->dmapages); page <= pend; page++)
+ mem_map_unreserve(page);
free_pages((unsigned long)s->card->dmapages,s->card->dmaorder);
s->card->dmapages = NULL;
*
********************************************************************/
+#include <linux/kernel.h>
#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
@@ -692,13+693,13 @@ static void start_adc(struct sv_state *s)
static void dealloc_dmabuf(struct sv_state *s, struct dmabuf *db)
{
- unsigned long map, mapend;
+ struct page *page, *pend;
if (db->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
}
db->rawbuf = NULL;
@@ -715,7+716,7 @@ static int prog_dmabuf(struct sv_state *s, unsigned rec) int order;
unsigned bytepersec;
unsigned bufs;
- unsigned long map, mapend;
+ struct page *page, *pend;
unsigned char fmt;
unsigned long flags;
@@ -747,9+748,9 @@ static int prog_dmabuf(struct sv_state *s, unsigned rec) printk(KERN_DEBUG "sv: DMA buffer beyond 16MB: busaddr 0x%lx size %ld\n",
virt_to_bus(db->rawbuf), PAGE_SIZE << db->buforder);
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
}
bytepersec = rate << sample_shift[fmt];
bufs = PAGE_SIZE << db->buforder;
@@ -810,8+810,9 @@ static void sscape_write_host_ctrl2(sscape_info *devc, int a, int b) static int sscape_alloc_dma(sscape_info *devc)
{
char *start_addr, *end_addr;
- int i, dma_pagesize;
+ int dma_pagesize;
int sz, size;
+ struct page *page;
if (devc->raw_buf != NULL) return 0; /* Already done */
dma_pagesize = (devc->dma < 4) ? (64 * 1024) : (128 * 1024);
@@ -848,23+849,24 @@ static int sscape_alloc_dma(sscape_info *devc) devc->raw_buf = start_addr;
devc->raw_buf_phys = virt_to_bus(start_addr);
- for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
- set_bit(PG_reserved, &mem_map[i].flags);;
+ for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+ mem_map_reserve(page);
return 1;
}
static void sscape_free_dma(sscape_info *devc)
{
- int sz, size, i;
+ int sz, size;
unsigned long start_addr, end_addr;
+ struct page *page;
if (devc->raw_buf == NULL) return;
for (sz = 0, size = PAGE_SIZE; size < devc->buffsize; sz++, size <<= 1);
start_addr = (unsigned long) devc->raw_buf;
end_addr = start_addr + devc->buffsize;
- for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
- clear_bit(PG_reserved, &mem_map[i].flags);;
+ for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+ mem_map_unreserve(page);
free_pages((unsigned long) devc->raw_buf, sz);
devc->raw_buf = NULL;
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/ac97_codec.h>
+#include <linux/wrapper.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
#include <linux/bitops.h>
@@ -925,7+926,7 @@ static int alloc_dmabuf(struct trident_state *state) struct dmabuf *dmabuf = &state->dmabuf;
void *rawbuf;
int order;
- unsigned long map, mapend;
+ struct page *page, *pend;
/* alloc as big a chunk as we can, FIXME: is this necessary ?? */
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
@@ -946,9+947,9 @@ static int alloc_dmabuf(struct trident_state *state) dmabuf->buforder = order;
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1);
- for (map = MAP_NR(rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
+ for (page = virt_to_page(rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
return 0;
}
@@ -957,13+958,13 @@ static int alloc_dmabuf(struct trident_state *state) static void dealloc_dmabuf(struct trident_state *state)
{
struct dmabuf *dmabuf = &state->dmabuf;
- unsigned long map, mapend;
+ struct page *page, *pend;
if (dmabuf->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
- for (map = MAP_NR(dmabuf->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
+ for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(state->card->pci_dev, PAGE_SIZE << dmabuf->buforder,
dmabuf->rawbuf, dmabuf->dma_handle);
}
@@ -422,7+422,7 @@ static void dmabuf_release(struct dmabuf *db) for(nr = 0; nr < NRSGBUF; nr++) {
if (!(p = db->sgbuf[nr]))
continue;
- mem_map_unreserve(MAP_NR(p));
+ mem_map_unreserve(virt_to_page(p));
free_page((unsigned long)p);
db->sgbuf[nr] = NULL;
}
@@ -464,7+464,7 @@ static int dmabuf_init(struct dmabuf *db) if (!p)
return -ENOMEM;
db->sgbuf[nr] = p;
- mem_map_reserve(MAP_NR(p));
+ mem_map_reserve(virt_to_page(p));
}
memset(db->sgbuf[nr], AFMT_ISUNSIGNED(db->format) ? 0x80 : 0, PAGE_SIZE);
if ((nr << PAGE_SHIFT) >= db->dmasize)
@@ -268,7+268,7 @@ static void *rvmalloc(unsigned long size) adr = (unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_reserve(MAP_NR(__va(page)));
+ mem_map_reserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
@@ -292,7+292,7 @@ static void rvfree(void *mem, unsigned long size) adr=(unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_unreserve(MAP_NR(__va(page)));
+ mem_map_unreserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
@@ -248,7+248,7 @@ static void *rvmalloc(unsigned long size) adr = (unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_reserve(MAP_NR(__va(page)));
+ mem_map_reserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
@@ -272,7+272,7 @@ static void rvfree(void *mem, unsigned long size) adr=(unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_unreserve(MAP_NR(__va(page)));
+ mem_map_unreserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
#include <linux/malloc.h>
#include <linux/init.h>
#include <linux/fb.h>
+#include <linux/wrapper.h>
#include <asm/hardware.h>
#include <asm/io.h>
@@ -1532,8+1533,8 @@ free_unused_pages(unsigned int virtual_start, unsigned int virtual_end) * set count to 1, and free
* the page.
*/
- clear_bit(PG_reserved, &mem_map[MAP_NR(virtual_start)].flags);
- atomic_set(&mem_map[MAP_NR(virtual_start)].count, 1);
+ mem_map_unreserve(virt_to_page(virtual_start));
+ atomic_set(&virt_to_page(virtual_start)->count, 1);
free_page(virtual_start);
virtual_start += PAGE_SIZE;
@@ -1628,7+1629,7 @@ acornfb_init(void) for (page = current_par.screen_base;
page < PAGE_ALIGN(current_par.screen_base + size);
page += PAGE_SIZE)
- mem_map[MAP_NR(page)].flags |= (1 << PG_reserved);
+ mem_map_reserve(virt_to_page(page));
/* Hand back any excess pages that we allocated. */
for (page = current_par.screen_base + size; page < top; page += PAGE_SIZE)
free_page(page);
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/delay.h>
+#include <linux/wrapper.h>
#include <asm/hardware.h>
#include <asm/io.h>
@@ -730,8+731,8 @@ __init sa1100fb_map_video_memory(void) u_int required_pages;
u_int extra_pages;
u_int order;
- u_int i;
char *allocated_region;
+ struct page *page;
if (VideoMemRegion != NULL)
return -EINVAL;
@@ -757,9+758,9 @@ __init sa1100fb_map_video_memory(void)
/* Set reserved flag for fb memory to allow it to be remapped into */
/* user space by the common fbmem driver using remap_page_range(). */
- for(i = MAP_NR(VideoMemRegion);
- i < MAP_NR(VideoMemRegion + ALLOCATED_FB_MEM_SIZE); i++)
- set_bit(PG_reserved, &mem_map[i].flags);
+ for(page = virt_to_page(VideoMemRegion);
+ page < virt_to_page(VideoMemRegion + ALLOCATED_FB_MEM_SIZE); page++)
+ mem_map_reserve(page);
/* Remap the fb memory to a non-buffered, non-cached region */
VideoMemRegion = (u_char *)__ioremap((u_long)VideoMemRegion_phys,
@@ -421,6+421,7 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned end = PMD_SIZE;
do {
pte_t page = *pte;
+ struct page *ptpage;
address += PAGE_SIZE;
pte++;
@@ -432,8+433,9 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned ++*pages;
if (pte_dirty(page))
++*dirty;
- if ((pte_pagenr(page) >= max_mapnr) ||
- PageReserved(pte_pagenr(page) + mem_map))
+ ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) ||
+ PageReserved(ptpage))
continue;
if (page_count(pte_page(page)) > 1)
++*shared;
@@ -245,7+245,7 @@ extern __inline__ pgd_t *get_pgd_slow(void) (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
pgd_val(ret[PTRS_PER_PGD])
- = pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
+ = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
}
return ret;
}
@@ -141,7+141,7 @@ extern unsigned long __zero_page(void);
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(ZERO_PGE))
+#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
@@ -209,8+209,7 @@ extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
-#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> 32)))
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> 32)))
extern inline unsigned long pmd_page(pmd_t pmd)
{ return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
@@ -145,7+145,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
@@ -80,7+80,7 @@ extern void __handle_bad_pmd_kernel(pmd_t *pmd); #define pte_clear(ptep) set_pte((ptep), __pte(0))
#ifndef CONFIG_DISCONTIGMEM
-#define pte_pagenr(pte) ((unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT)))
+#define pte_page(x) (mem_map + (unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT)))
#else
/*
* I'm not happy with this - we needlessly convert a physical address
@@ -88,7+88,7 @@ extern void __handle_bad_pmd_kernel(pmd_t *pmd); * which, if __va and __pa are expensive causes twice the expense for
* zero gain. --rmk
*/
-#define pte_pagenr(pte) MAP_NR(__va(pte_val(pte)))
+#define pte_page(x) (mem_map + MAP_NR(__va(pte_val(pte))))
#endif
#define pmd_none(pmd) (!pmd_val(pmd))
@@ -99,7+99,6 @@ extern void __handle_bad_pmd_kernel(pmd_t *pmd); */
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
-#define pte_page(x) (mem_map + pte_pagenr(x))
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -89,7+89,7 @@ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) * for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
@@ -422,7+422,7 @@ unsigned long get_wchan(struct task_struct *p); #define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_task_struct() ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long) (p), 1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define page_address(page) ((void *) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)))
/*
- * Given a PTE, return the index of the mem_map[] entry corresponding
- * to the page frame the PTE.
- */
-#define pte_pagenr(x) ((unsigned long) ((pte_val(x) & _PFN_MASK) >> PAGE_SHIFT))
-
-/*
* Now for some cache flushing routines. This is the kind of stuff
* that can be very expensive, so try to avoid them whenever possible.
*/
@@ -250,7+244,7 @@ extern pmd_t *ia64_bad_pagetable (void); #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
#define pte_clear(pte) (pte_val(*(pte)) = 0UL)
/* pte_page() returns the "struct page *" corresponding to the PTE: */
-#define pte_page(pte) (mem_map + pte_pagenr(pte))
+#define pte_page(pte) (mem_map + (unsigned long) ((pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT))
#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = __pa(ptep))
#define pmd_none(pmd) (!pmd_val(pmd))
@@ -418,7+412,7 @@ do { \ * for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
# endif /* !__ASSEMBLY__ */
@@ -685,7+685,7 @@ thread_saved_pc (struct thread_struct *t) #define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES))
#define free_task_struct(p) free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
@@ -172,7+172,7 @@ extern pte_t * __bad_pagetable(void);
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
@@ -228,7+228,6 @@ extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) #define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_FAKE_SUPER))
#define pte_clear(ptep) ({ pte_val(*(ptep)) = 0; })
-#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
@@ -248,7+247,7 @@ extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) /* Permanent address of a page. */
#define page_address(page) ((page)->virtual)
#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
-#define pte_page(pte) (mem_map+pte_pagenr(pte))
+#define pte_page(pte) (mem_map+((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT))
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
@@ -141,7+141,7 @@ unsigned long get_wchan(struct task_struct *p); ({ \
unsigned long eip = 0; \
if ((tsk)->thread.esp0 > PAGE_SIZE && \
- MAP_NR((tsk)->thread.esp0) < max_mapnr) \
+ (VALID_PAGE(virt_to_page((tsk)->thread.esp0)))) \
eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
eip; })
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
@@ -152,7+152,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
@@ -216,7+216,7 @@ extern unsigned long zero_page_mask; #define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) \
- (mem_map + MAP_NR(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
+ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
@@ -241,11+241,6 @@ extern pmd_t invalid_pte_table[PAGE_SIZE/sizeof(pmd_t)]; * Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-extern inline unsigned long pte_page(pte_t pte)
-{
- return PAGE_OFFSET + (pte_val(pte) & PAGE_MASK);
-}
-
extern inline unsigned long pmd_page(pmd_t pmd)
{
return pmd_val(pmd);
@@ -312,8+307,7 @@ extern inline void pgd_clear(pgd_t *pgdp) { } * is simple.
*/
#define page_address(page) ((page)->virtual)
-#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
/*
* The following only work if pte_present() is true.
@@ -234,7+234,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
@@ -241,7+241,7 @@ extern unsigned long zero_page_mask; #define BAD_PMDTABLE __bad_pmd_table()
#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) \
- (mem_map + MAP_NR(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
+ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
@@ -267,11+267,6 @@ extern pmd_t empty_bad_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)]; * Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-extern inline unsigned long pte_page(pte_t pte)
-{
- return PAGE_OFFSET + (pte_val(pte) & PAGE_MASK);
-}
-
extern inline unsigned long pmd_page(pmd_t pmd)
{
return pmd_val(pmd);
@@ -359,13+354,13 @@ extern inline void pgd_clear(pgd_t *pgdp) */
#define page_address(page) ((page)->virtual)
#ifndef CONFIG_DISCONTIGMEM
-#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#else
-#define pte_pagenr(x) \
+#define mips64_pte_pagenr(x) \
(PLAT_NODE_DATA_STARTNR(PHYSADDR_TO_NID(pte_val(x))) + \
PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x))))
+#define pte_page(x) (mem_map+mips64_pte_pagenr(x))
#endif
-#define pte_page(x) (mem_map+pte_pagenr(x))
/*
* The following only work if pte_present() is true.
@@ -290,7+290,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL, 2))
#define free_task_struct(p) free_pages((unsigned long)(p), 2)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
@@ -274,7+274,7 @@ extern unsigned long ioremap_bot, ioremap_base; * for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/*
* BAD_PAGETABLE is used when we need a bogus page-table, while
@@ -303,7+303,6 @@ extern pte_t * __bad_pagetable(void); #define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pte_clear(ptep) do { pte_val(*(ptep)) = 0; } while (0)
-#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) != 0)
@@ -315,7+314,7 @@ extern pte_t * __bad_pagetable(void); */
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#ifndef __ASSEMBLY__
/*
@@ -687,7+687,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
/* in process.c - for early bootup debug -- Cort */
int ll_printk(const char *, ...);
@@ -41,7+41,7 @@ extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); * for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
/* Certain architectures need to do special things when PTEs
@@ -272,7+272,6 @@ extern inline int pte_none(pte_t pte) { return ((pte_val(pte) & (_PAGE extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = _PAGE_INVALID; }
#define PTE_INIT(x) pte_clear(x)
-extern inline int pte_pagenr(pte_t pte) { return ((unsigned long)((pte_val(pte) >> PAGE_SHIFT))); }
extern inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; }
extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) == 0); }
@@ -337,7+336,7 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) = (pte_val(pte) & PAGE_MASK) | pgprot_val(newprot); return pte; }
#define page_address(page) ((page)->virtual)
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(pte) >> PAGE_SHIFT)))
#define pmd_page(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
@@ -149,7+149,7 @@ unsigned long get_wchan(struct task_struct *p); #define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
@@ -62,7+62,7 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg); * for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
@@ -156,7+156,6 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd); #define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
-#define pte_pagenr(x) ((unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT)))
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
@@ -169,7+168,7 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd); */
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT)))
/*
* The following only work if pte_present() is true.
@@ -217,7+217,7 @@ extern unsigned long get_wchan(struct task_struct *p); #define THREAD_SIZE (2*PAGE_SIZE)
extern struct task_struct * alloc_task_struct(void);
extern void free_task_struct(struct task_struct *);
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
@@ -204,11+204,11 @@ extern unsigned long empty_zero_page;
#define SIZEOF_PTR_LOG2 2
-BTFIXUPDEF_CALL_CONST(unsigned long, pte_pagenr, pte_t)
+BTFIXUPDEF_CALL_CONST(unsigned long, sparc_pte_pagenr, pte_t)
BTFIXUPDEF_CALL_CONST(unsigned long, pmd_page, pmd_t)
BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t)
-#define pte_pagenr(pte) BTFIXUP_CALL(pte_pagenr)(pte)
+#define sparc_pte_pagenr(pte) BTFIXUP_CALL(sparc_pte_pagenr)(pte)
#define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
#define pgd_page(pgd) BTFIXUP_CALL(pgd_page)(pgd)
@@ -308,7+308,7 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
/* Permanent address of a page. */
#define page_address(page) ((page)->virtual)
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+sparc_pte_pagenr(x))
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -125,7+125,7 @@ extern struct pgtable_cache_struct {
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
- struct page *page = mem_map + MAP_NR(pgd);
+ struct page *page = virt_to_page(pgd);
if (!page->pprev_hash) {
(unsigned long *)page->next_hash = pgd_quicklist;
@@ -177,7+177,7 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
#define pgd_set(pgdp, pmdp) \
(pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
-#define pte_pagenr(pte) (((unsigned long) ((pte_val(pte)&~PAGE_OFFSET)-phys_base)>>PAGE_SHIFT))
+#define sparc64_pte_pagenr(pte) (((unsigned long) ((pte_val(pte)&~PAGE_OFFSET)-phys_base)>>PAGE_SHIFT))
#define pmd_page(pmd) ((unsigned long) __va((pmd_val(pmd)<<11UL)))
#define pgd_page(pgd) ((unsigned long) __va((pgd_val(pgd)<<11UL)))
#define pte_none(pte) (!pte_val(pte))
@@ -209,7+209,7 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) #define __page_address(page) ((page)->virtual)
#define page_address(page) ({ __page_address(page); })
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+sparc64_pte_pagenr(x))
/* Be very careful when you change these three, they are delicate. */
#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))
@@ -259,7+259,7 @@ __out: __ret; \ /* Allocation and freeing of task_struct and kernel stack. */
#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL, 1))
#define free_task_struct(tsk) free_pages((unsigned long)(tsk),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
@@ -11,7+11,6 @@ extern struct page *highmem_start_page; #include <asm/highmem.h>
/* declarations for linux/mm/highmem.c */
-extern unsigned long highmem_mapnr;
FASTCALL(unsigned int nr_free_highpages(void));
extern struct page * prepare_highmem_swapout(struct page *);
@@ -338,6+338,9 @@ extern unsigned long FASTCALL(get_zeroed_page(int gfp_mask)); #define __get_dma_pages(gfp_mask, order) \
__get_free_pages((gfp_mask) | GFP_DMA,(order))
+#define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr))
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+
/*
* The old interface name will be removed in 2.5:
*/
/*
* From a kernel address, get the "struct page *"
*/
-#define page_cache_entry(x) (mem_map + MAP_NR(x))
+#define page_cache_entry(x) virt_to_page(x)
extern unsigned int page_hash_bits;
#define PAGE_HASH_BITS (page_hash_bits)
#define vma_get_end(v) v->vm_end
#define vma_get_page_prot(v) v->vm_page_prot
-#define mem_map_reserve(p) set_bit(PG_reserved, &mem_map[p].flags)
-#define mem_map_unreserve(p) clear_bit(PG_reserved, &mem_map[p].flags)
-#define mem_map_inc_count(p) atomic_inc(&(mem_map[p].count))
-#define mem_map_dec_count(p) atomic_dec(&(mem_map[p].count))
+#define mem_map_reserve(p) set_bit(PG_reserved, &p->flags)
+#define mem_map_unreserve(p) clear_bit(PG_reserved, &p->flags)
+#define mem_map_inc_count(p) atomic_inc(&(p->count))
+#define mem_map_dec_count(p) atomic_dec(&(p->count))
#endif
@@ -24,7+24,6 @@ static int access_one_page(struct mm_struct * mm, struct vm_area_struct * vma, u pgd_t * pgdir;
pmd_t * pgmiddle;
pte_t * pgtable;
- unsigned long mapnr;
unsigned long maddr;
struct page *page;
@@ -42,11+41,10 @@ repeat: pgtable = pte_offset(pgmiddle, addr);
if (!pte_present(*pgtable))
goto fault_in_page;
- mapnr = pte_pagenr(*pgtable);
if (write && (!pte_write(*pgtable) || !pte_dirty(*pgtable)))
goto fault_in_page;
- page = mem_map + mapnr;
- if ((mapnr >= max_mapnr) || PageReserved(page))
+ page = pte_page(*pgtable);
+ if ((!VALID_PAGE(page)) || PageReserved(page))
return 0;
flush_cache_page(vma, addr);
@@ -246,7+246,7 @@ static unsigned long __init free_all_bootmem_core(int nid, bootmem_data_t *bdata * Now free the allocator bitmap itself, it's not
* needed anymore:
*/
- page = mem_map + MAP_NR(bdata->node_bootmem_map);
+ page = virt_to_page(bdata->node_bootmem_map);
count = 0;
for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
count++;
#include <linux/swap.h>
#include <linux/slab.h>
-unsigned long highmem_mapnr;
-
/*
* Take one locked page, return another low-memory locked page.
*/
@@ -61,7+59,7 @@ struct page * prepare_highmem_swapout(struct page * page) * we stored its data into the new regular_page.
*/
page_cache_release(page);
- new_page = mem_map + MAP_NR(regular_page);
+ new_page = virt_to_page(regular_page);
LockPage(new_page);
return new_page;
}
@@ -210,7+210,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
do {
pte_t pte = *src_pte;
- unsigned long page_nr;
+ struct page *ptepage;
/* copy_one_pte */
@@ -221,9+221,9 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; set_pte(dst_pte, pte);
goto cont_copy_pte_range;
}
- page_nr = pte_pagenr(pte);
- if (page_nr >= max_mapnr ||
- PageReserved(mem_map+page_nr)) {
+ ptepage = pte_page(pte);
+ if ((!VALID_PAGE(ptepage)) ||
+ PageReserved(ptepage)) {
set_pte(dst_pte, pte);
goto cont_copy_pte_range;
}
@@ -236,7+236,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; if (vma->vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
set_pte(dst_pte, pte_mkold(pte));
- get_page(mem_map + page_nr);
+ get_page(ptepage);
cont_copy_pte_range: address += PAGE_SIZE;
if (address >= end)
static inline int free_pte(pte_t page)
{
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return 0;
/*
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
- free_page_and_swap_cache(mem_map+nr);
+ free_page_and_swap_cache(ptpage);
return 1;
}
swap_free(pte_to_swp_entry(page));
@@ -409,7+409,7 @@ static struct page * follow_page(unsigned long address)
static inline struct page * get_page_map(struct page *page)
{
- if (page > (mem_map + max_mapnr))
+ if (!VALID_PAGE(page))
return 0;
return page;
}
@@ -711,12+711,12 @@ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned if (end > PMD_SIZE)
end = PMD_SIZE;
do {
- unsigned long mapnr;
+ struct page *page;
pte_t oldpage = *pte;
pte_clear(pte);
- mapnr = MAP_NR(__va(phys_addr));
- if (mapnr >= max_mapnr || PageReserved(mem_map+mapnr))
+ page = virt_to_page(__va(phys_addr));
+ if ((!VALID_PAGE(page)) || PageReserved(page))
set_pte(pte, mk_pte_phys(phys_addr, prot));
forget_pte(oldpage);
address += PAGE_SIZE;
@@ -818,13+818,11 @@ static inline void break_cow(struct vm_area_struct * vma, struct page * old_page static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
unsigned long address, pte_t *page_table, pte_t pte)
{
- unsigned long map_nr;
struct page *old_page, *new_page;
- map_nr = pte_pagenr(pte);
- if (map_nr >= max_mapnr)
+ old_page = pte_page(pte);
+ if (!VALID_PAGE(old_page))
goto bad_wp_page;
- old_page = mem_map + map_nr;
/*
* We can avoid the copy if:
@@ -883,7+881,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
bad_wp_page:
spin_unlock(&mm->page_table_lock);
- printk("do_wp_page: bogus page at address %08lx (nr %ld)\n",address,map_nr);
+ printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
return -1;
}
@@ -920,7+918,7 @@ static void partial_clear(struct vm_area_struct *vma, unsigned long address) return;
flush_cache_page(vma, address);
page = pte_page(pte);
- if ((page-mem_map >= max_mapnr) || PageReserved(page))
+ if ((!VALID_PAGE(page)) || PageReserved(page))
return;
offset = address & ~PAGE_MASK;
memclear_highpage_flush(page, offset, PAGE_SIZE - offset);
@@ -86,7+86,7 @@ static void __free_pages_ok (struct page *page, unsigned long order) BUG();
if (page->mapping)
BUG();
- if (page-mem_map >= max_mapnr)
+ if (!VALID_PAGE(page))
BUG();
if (PageSwapCache(page))
BUG();
@@ -350,14+350,14 @@ void __free_pages(struct page *page, unsigned long order)
void free_pages(unsigned long addr, unsigned long order)
{
- unsigned long map_nr;
+ struct page *fpage;
#ifdef CONFIG_DISCONTIGMEM
if (addr == 0) return;
#endif
- map_nr = MAP_NR(addr);
- if (map_nr < max_mapnr)
- __free_pages(mem_map + map_nr, order);
+ fpage = virt_to_page(addr);
+ if (VALID_PAGE(fpage))
+ __free_pages(fpage, order);
}
/*
@@ -126,7+126,7 @@ void rw_swap_page(int rw, struct page *page, int wait) */
void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf, int wait)
{
- struct page *page = mem_map + MAP_NR(buf);
+ struct page *page = virt_to_page(buf);
if (!PageLocked(page))
PAGE_BUG(page);
@@ -496,7+496,7 @@ static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags) static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
{
unsigned long i = (1<<cachep->gfporder);
- struct page *page = mem_map + MAP_NR(addr);
+ struct page *page = virt_to_page(addr);
/* free_pages() does not clear the type bit - we do that.
* The pages have been unlinked from their cache-slab,
@@ -1115,7+1115,7 @@ static int kmem_cache_grow (kmem_cache_t * cachep, int flags)
/* Nasty!!!!!! I hope this is OK. */
i = 1 << cachep->gfporder;
- page = mem_map + MAP_NR(objp);
+ page = virt_to_page(objp);
do {
SET_PAGE_CACHE(page, cachep);
SET_PAGE_SLAB(page, slabp);
@@ -1321,9+1321,9 @@ alloc_new_slab_nolock: */
#if DEBUG
-# define CHECK_NR(nr) \
+# define CHECK_NR(pg) \
do { \
- if (nr >= max_mapnr) { \
+ if (!VALID_PAGE(pg)) { \
printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
@@ -1331,6+1331,7 @@ alloc_new_slab_nolock: } while (0)
# define CHECK_PAGE(page) \
do { \
+ CHECK_NR(page); \
if (!PageSlab(page)) { \
printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
(unsigned long)objp); \
@@ -1339,23+1340,21 @@ alloc_new_slab_nolock: } while (0)
#else
-# define CHECK_NR(nr) do { } while (0)
-# define CHECK_PAGE(nr) do { } while (0)
+# define CHECK_PAGE(pg) do { } while (0)
#endif
static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
{
slab_t* slabp;
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
/* reduces memory footprint
*
if (OPTIMIZE(cachep))
slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
else
*/
- slabp = GET_PAGE_SLAB(mem_map + MAP_NR(objp));
+ slabp = GET_PAGE_SLAB(virt_to_page(objp));
#if DEBUG
if (cachep->flags & SLAB_DEBUG_INITIAL)
@@ -1452,8+1451,7 @@ static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp) #ifdef CONFIG_SMP
cpucache_t *cc = cc_data(cachep);
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
if (cc) {
int batchcount;
if (cc->avail < cc->limit) {
@@ -1536,9+1534,8 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp) {
unsigned long flags;
#if DEBUG
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
- if (cachep != GET_PAGE_CACHE(mem_map + MAP_NR(objp)))
+ CHECK_PAGE(virt_to_page(objp));
+ if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
BUG();
#endif
@@ -1562,9+1559,8 @@ void kfree (const void *objp) if (!objp)
return;
local_irq_save(flags);
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
- c = GET_PAGE_CACHE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
+ c = GET_PAGE_CACHE(virt_to_page(objp));
__kmem_cache_free(c, (void*)objp);
local_irq_restore(flags);
}
@@ -220,7+220,7 @@ struct page * read_swap_cache_async(swp_entry_t entry, int wait) new_page_addr = __get_free_page(GFP_USER);
if (!new_page_addr)
goto out_free_swap; /* Out of memory */
- new_page = mem_map + MAP_NR(new_page_addr);
+ new_page = virt_to_page(new_page_addr);
/*
* Check the swap cache again, in case we stalled above.
@@ -645,7+645,7 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags) goto bad_swap;
}
- lock_page(mem_map + MAP_NR(swap_header));
+ lock_page(virt_to_page(swap_header));
rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header, 1);
if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
@@ -41,10+41,9 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo if (pte_none(page))
continue;
if (pte_present(page)) {
- unsigned long map_nr = pte_pagenr(page);
- if ((map_nr < max_mapnr) &&
- (!PageReserved(mem_map + map_nr)))
- __free_page(mem_map + map_nr);
+ struct page *ptpage = pte_page(page);
+ if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
+ __free_page(ptpage);
continue;
}
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
@@ -45,7+45,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un if (!pte_present(pte))
goto out_failed;
page = pte_page(pte);
- if ((page-mem_map >= max_mapnr) || PageReserved(page))
+ if ((!VALID_PAGE(page)) || PageReserved(page))
goto out_failed;
if (mm->swap_cnt)
@@ -1564,11+1564,11 @@ static void free_pg_vec(unsigned long *pg_vec, unsigned order, unsigned len)
for (i=0; i<len; i++) {
if (pg_vec[i]) {
- unsigned long map, mapend;
+ struct page *page, *pend;
- mapend = MAP_NR(pg_vec[i] + (PAGE_SIZE << order) - 1);
- for (map = MAP_NR(pg_vec[i]); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(pg_vec[i] + (PAGE_SIZE << order) - 1);
+ for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
+ mem_map_unreserve(page);
free_pages(pg_vec[i], order);
}
}
@@ -1616,14+1616,14 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing memset(pg_vec, 0, req->tp_block_nr*sizeof(unsigned long*));
for (i=0; i<req->tp_block_nr; i++) {
- unsigned long map, mapend;
+ struct page *page, *pend;
pg_vec[i] = __get_free_pages(GFP_KERNEL, order);
if (!pg_vec[i])
goto out_free_pgvec;
- mapend = MAP_NR(pg_vec[i] + (PAGE_SIZE << order) - 1);
- for (map = MAP_NR(pg_vec[i]); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(pg_vec[i] + (PAGE_SIZE << order) - 1);
+ for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
+ mem_map_reserve(page);
}
/* Page vector is allocated */