summaryrefslogtreecommitdiff
path: root/arch/parisc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/include/asm')
-rw-r--r--arch/parisc/include/asm/cacheflush.h5
-rw-r--r--arch/parisc/include/asm/pgtable.h47
-rw-r--r--arch/parisc/include/asm/uaccess.h14
3 files changed, 33 insertions, 33 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 79f694f3ad9..f0e2784e7cc 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -140,7 +140,10 @@ static inline void *kmap(struct page *page)
return page_address(page);
}
-#define kunmap(page) kunmap_parisc(page_address(page))
+static inline void kunmap(struct page *page)
+{
+ kunmap_parisc(page_address(page));
+}
static inline void *kmap_atomic(struct page *page)
{
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 7df49fad29f..1e40d7f86be 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -16,6 +16,8 @@
#include <asm/processor.h>
#include <asm/cache.h>
+extern spinlock_t pa_dbit_lock;
+
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >=
@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define set_pte_at(mm, addr, ptep, pteval) \
do { \
+ unsigned long flags; \
+ spin_lock_irqsave(&pa_dbit_lock, flags); \
set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \
+ spin_unlock_irqrestore(&pa_dbit_lock, flags); \
} while (0)
#endif /* !__ASSEMBLY__ */
@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
-#ifdef CONFIG_SMP
+ pte_t pte;
+ unsigned long flags;
+
if (!pte_young(*ptep))
return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
-#else
- pte_t pte = *ptep;
- if (!pte_young(pte))
+
+ spin_lock_irqsave(&pa_dbit_lock, flags);
+ pte = *ptep;
+ if (!pte_young(pte)) {
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 0;
- set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
+ }
+ set_pte(ptep, pte_mkold(pte));
+ purge_tlb_entries(vma->vm_mm, addr);
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 1;
-#endif
}
-extern spinlock_t pa_dbit_lock;
-
struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
+ unsigned long flags;
- spin_lock(&pa_dbit_lock);
+ spin_lock_irqsave(&pa_dbit_lock, flags);
old_pte = *ptep;
pte_clear(mm,addr,ptep);
- spin_unlock(&pa_dbit_lock);
+ purge_tlb_entries(mm, addr);
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
-#ifdef CONFIG_SMP
- unsigned long new, old;
-
- do {
- old = pte_val(*ptep);
- new = pte_val(pte_wrprotect(__pte (old)));
- } while (cmpxchg((unsigned long *) ptep, old, new) != old);
+ unsigned long flags;
+ spin_lock_irqsave(&pa_dbit_lock, flags);
+ set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
-#else
- pte_t old_pte = *ptep;
- set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
-#endif
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4ba2c93770f..e0a82358517 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -181,30 +181,24 @@ struct exception_data {
#if !defined(CONFIG_64BIT)
#define __put_kernel_asm64(__val,ptr) do { \
- u64 __val64 = (u64)(__val); \
- u32 hi = (__val64) >> 32; \
- u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%1)" \
- "\n2:\tstw %3,4(%1)\n\t" \
+ "\n2:\tstw %R2,4(%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
- : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
+ : "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
} while (0)
#define __put_user_asm64(__val,ptr) do { \
- u64 __val64 = (u64)(__val); \
- u32 hi = (__val64) >> 32; \
- u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%%sr3,%1)" \
- "\n2:\tstw %3,4(%%sr3,%1)\n\t" \
+ "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
- : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
+ : "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
} while (0)