summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorDave McCracken <dmccr@us.ibm.com>2006-09-25 23:31:48 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 08:48:51 -0700
commit46a82b2d5591335277ed2930611f6acb4ce654ed (patch)
treee90bc1843701af2012bae92564f7109027a8244f /arch
parentd2e7b7d0aa021847c59f882b066e7d3812902870 (diff)
downloadlinux-3.10-46a82b2d5591335277ed2930611f6acb4ce654ed.tar.gz
linux-3.10-46a82b2d5591335277ed2930611f6acb4ce654ed.tar.bz2
linux-3.10-46a82b2d5591335277ed2930611f6acb4ce654ed.zip
[PATCH] Standardize pxx_page macros
One of the changes necessary for shared page tables is to standardize the pxx_page macros. pte_page and pmd_page have always returned the struct page associated with their entry, while pte_page_kernel and pmd_page_kernel have returned the kernel virtual address. pud_page and pgd_page, on the other hand, return the kernel virtual address. Shared page tables needs pud_page and pgd_page to return the actual page structures. There are very few actual users of these functions, so it is simple to standardize their usage. Since this is basic cleanup, I am submitting these changes as a standalone patch. Per Hugh Dickins' comments about it, I am also changing the pxx_page_kernel macros to pxx_page_vaddr to clarify their meaning. Signed-off-by: Dave McCracken <dmccr@us.ibm.com> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/sh/mm/cache-sh7705.c2
-rw-r--r--arch/sparc/mm/srmmu.c2
-rw-r--r--arch/sparc/mm/sun4c.c2
-rw-r--r--arch/um/kernel/skas/mmu.c2
-rw-r--r--arch/x86_64/mm/fault.c6
6 files changed, 8 insertions, 8 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 88a999df0ab..591fc3187c7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -177,7 +177,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Free the page table, if there was one.
*/
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
- pte_free_kernel(pmd_page_kernel(pmd));
+ pte_free_kernel(pmd_page_vaddr(pmd));
}
addr += PGDIR_SIZE;
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index ad8ed7d41e1..bf94eedb0a8 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -30,7 +30,7 @@
#define __pte_offset(address) \
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
+#define pte_offset(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
__pte_offset(address))
static inline void cache_wback_all(void)
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 16e13f663ab..b27a506309e 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -2175,7 +2175,7 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
BTFIXUPSET_SETHI(none_mask, 0xF0000000);
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 7fdddf3c7e1..436021ceb2e 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -2280,5 +2280,5 @@ void __init ld_mmu_sun4c(void)
/* These should _never_ get called with two level tables. */
BTFIXUPSET_CALL(pgd_set, sun4c_pgd_set, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(pgd_page, sun4c_pgd_page, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(pgd_page_vaddr, sun4c_pgd_page, BTFIXUPCALL_RETO0);
}
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 624ca238d1f..79c22707a63 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -55,7 +55,7 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
* destroy_context_skas.
*/
- mm->context.skas.last_page_table = pmd_page_kernel(*pmd);
+ mm->context.skas.last_page_table = pmd_page_vaddr(*pmd);
#ifdef CONFIG_3_LEVEL_PGTABLES
mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
#endif
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index ac8ea66ccb9..4198798e146 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -299,7 +299,7 @@ static int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
else
- BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
/* Below here mismatches are bugs because these lower tables
are shared */
@@ -308,7 +308,7 @@ static int vmalloc_fault(unsigned long address)
pud_ref = pud_offset(pgd_ref, address);
if (pud_none(*pud_ref))
return -1;
- if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
+ if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
BUG();
pmd = pmd_offset(pud, address);
pmd_ref = pmd_offset(pud_ref, address);
@@ -641,7 +641,7 @@ void vmalloc_sync_all(void)
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
else
- BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
}
spin_unlock(&pgd_lock);
set_bit(pgd_index(address), insync);