summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-02-07 10:10:09 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-07 10:10:09 -0800
commite8a82fd030c2e0ba4d4169cc4257ef174ff6f6b4 (patch)
tree0515ffe68d9a88eda2d16f45e6391b05475fd9d1 /include
parente1a8e6c9b757c09249ab29fc6da12d9ab64567e1 (diff)
parentb38c73995f63fe205c0068cb0ce3131895244068 (diff)
downloadkernel-common-e8a82fd030c2e0ba4d4169cc4257ef174ff6f6b4.tar.gz
kernel-common-e8a82fd030c2e0ba4d4169cc4257ef174ff6f6b4.tar.bz2
kernel-common-e8a82fd030c2e0ba4d4169cc4257ef174ff6f6b4.zip
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips/bitops.h58
-rw-r--r--include/asm-mips/byteorder.h29
-rw-r--r--include/asm-mips/cacheflush.h3
-rw-r--r--include/asm-mips/hazards.h2
-rw-r--r--include/asm-mips/interrupt.h27
-rw-r--r--include/asm-mips/mach-au1x00/au1000.h4
-rw-r--r--include/asm-mips/mach-cobalt/cobalt.h (renamed from include/asm-mips/cobalt/cobalt.h)0
-rw-r--r--include/asm-mips/mach-cobalt/cpu-feature-overrides.h56
-rw-r--r--include/asm-mips/mach-cobalt/mach-gt64120.h (renamed from include/asm-mips/cobalt/mach-gt64120.h)0
-rw-r--r--include/asm-mips/mach-ip32/cpu-feature-overrides.h2
-rw-r--r--include/asm-mips/r4kcache.h400
-rw-r--r--include/asm-mips/reboot.h3
-rw-r--r--include/asm-mips/string.h22
-rw-r--r--include/asm-mips/tx4927/tx4927.h21
-rw-r--r--include/asm-mips/tx4927/tx4927_pci.h10
-rw-r--r--include/asm-mips/uaccess.h71
16 files changed, 250 insertions, 458 deletions
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 3b0c8aaf6e8b..8e802059fe67 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -644,20 +644,26 @@ static inline unsigned long ffz(unsigned long word)
}
/*
- * flz - find last zero in word.
+ * fls - find last bit set.
* @word: The word to search
*
- * Returns 0..SZLONG-1
- * Undefined if no zero exists, so code should check against ~0UL first.
+ * Returns 1..SZLONG
+ * Returns 0 if no bit exists
*/
-static inline unsigned long flz(unsigned long word)
+static inline unsigned long fls(unsigned long word)
{
-#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
- return __ilog2(~word);
-#else
#ifdef CONFIG_32BIT
- int r = 31, s;
- word = ~word;
+#ifdef CONFIG_CPU_MIPS32
+ __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
+
+ return 32 - word;
+#else
+ {
+ int r = 32, s;
+
+ if (word == 0)
+ return 0;
+
s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s;
s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s;
s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s;
@@ -665,10 +671,23 @@ static inline unsigned long flz(unsigned long word)
s = 1; if ((word & 0x80000000)) s = 0; r -= s;
return r;
+ }
#endif
+#endif /* CONFIG_32BIT */
+
#ifdef CONFIG_64BIT
- int r = 63, s;
- word = ~word;
+#ifdef CONFIG_CPU_MIPS64
+
+ __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
+
+ return 64 - word;
+#else
+ {
+ int r = 64, s;
+
+ if (word == 0)
+ return 0;
+
s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s;
s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s;
s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s;
@@ -677,24 +696,11 @@ static inline unsigned long flz(unsigned long word)
s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s;
return r;
+ }
#endif
-#endif
+#endif /* CONFIG_64BIT */
}
-/*
- * fls - find last bit set.
- * @word: The word to search
- *
- * Returns 1..SZLONG
- * Returns 0 if no bit exists
- */
-static inline unsigned long fls(unsigned long word)
-{
- if (word == 0)
- return 0;
-
- return flz(~word) + 1;
-}
#define fls64(x) generic_fls64(x)
/*
diff --git a/include/asm-mips/byteorder.h b/include/asm-mips/byteorder.h
index d1fe9e5c62e4..584f8128fffd 100644
--- a/include/asm-mips/byteorder.h
+++ b/include/asm-mips/byteorder.h
@@ -8,10 +8,39 @@
#ifndef _ASM_BYTEORDER_H
#define _ASM_BYTEORDER_H
+#include <linux/config.h>
+#include <linux/compiler.h>
#include <asm/types.h>
#ifdef __GNUC__
+#ifdef CONFIG_CPU_MIPSR2
+
+static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
+{
+ __asm__(
+ " wsbh %0, %1 \n"
+ : "=r" (x)
+ : "r" (x));
+
+ return x;
+}
+#define __arch__swab16(x) ___arch__swab16(x)
+
+static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+ __asm__(
+ " wsbh %0, %1 \n"
+ " rotr %0, %0, 16 \n"
+ : "=r" (x)
+ : "r" (x));
+
+ return x;
+}
+#define __arch__swab32(x) ___arch__swab32(x)
+
+#endif /* CONFIG_CPU_MIPSR2 */
+
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h
index a18ba2edc0b6..aeae9fabf4a9 100644
--- a/include/asm-mips/cacheflush.h
+++ b/include/asm-mips/cacheflush.h
@@ -49,8 +49,7 @@ static inline void flush_dcache_page(struct page *page)
extern void (*flush_icache_page)(struct vm_area_struct *vma,
struct page *page);
-extern void (*flush_icache_range)(unsigned long __user start,
- unsigned long __user end);
+extern void (*flush_icache_range)(unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h
index 2fc90632f88c..6111a0ce58c4 100644
--- a/include/asm-mips/hazards.h
+++ b/include/asm-mips/hazards.h
@@ -100,7 +100,7 @@
__asm__(
" .macro _ssnop \n\t"
- " sll $0, $2, 1 \n\t"
+ " sll $0, $0, 1 \n\t"
" .endm \n\t"
" \n\t"
" .macro _ehb \n\t"
diff --git a/include/asm-mips/interrupt.h b/include/asm-mips/interrupt.h
index abdf54ee64cf..774348734fa0 100644
--- a/include/asm-mips/interrupt.h
+++ b/include/asm-mips/interrupt.h
@@ -47,6 +47,17 @@ static inline void local_irq_enable(void)
* R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
* no nops at all.
*/
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
__asm__ (
" .macro local_irq_disable\n"
" .set push \n"
@@ -55,8 +66,8 @@ __asm__ (
" di \n"
#else
" mfc0 $1,$12 \n"
- " ori $1,1 \n"
- " xori $1,1 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
#endif
@@ -96,8 +107,8 @@ __asm__ (
" andi \\result, 1 \n"
#else
" mfc0 \\result, $12 \n"
- " ori $1, \\result, 1 \n"
- " xori $1, 1 \n"
+ " ori $1, \\result, 0x1f \n"
+ " xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
#endif
@@ -114,6 +125,7 @@ __asm__ __volatile__( \
__asm__ (
" .macro local_irq_restore flags \n"
+ " .set push \n"
" .set noreorder \n"
" .set noat \n"
#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
@@ -135,14 +147,13 @@ __asm__ (
#else
" mfc0 $1, $12 \n"
" andi \\flags, 1 \n"
- " ori $1, 1 \n"
- " xori $1, 1 \n"
+ " ori $1, 0x1f \n"
+ " xori $1, 0x1f \n"
" or \\flags, $1 \n"
" mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
- " .set at \n"
- " .set reorder \n"
+ " .set pop \n"
" .endm \n");
#define local_irq_restore(flags) \
diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h
index 8e1d7ed7d8e3..4686e17c206c 100644
--- a/include/asm-mips/mach-au1x00/au1000.h
+++ b/include/asm-mips/mach-au1x00/au1000.h
@@ -1198,7 +1198,11 @@ extern au1xxx_irq_map_t au1xxx_irq_map[];
/* UARTS 0-3 */
#define UART_BASE UART0_ADDR
+#ifdef CONFIG_SOC_AU1200
+#define UART_DEBUG_BASE UART1_ADDR
+#else
#define UART_DEBUG_BASE UART3_ADDR
+#endif
#define UART_RX 0 /* Receive buffer */
#define UART_TX 4 /* Transmit buffer */
diff --git a/include/asm-mips/cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h
index 78e1df2095fb..78e1df2095fb 100644
--- a/include/asm-mips/cobalt/cobalt.h
+++ b/include/asm-mips/mach-cobalt/cobalt.h
diff --git a/include/asm-mips/mach-cobalt/cpu-feature-overrides.h b/include/asm-mips/mach-cobalt/cpu-feature-overrides.h
new file mode 100644
index 000000000000..ace8c5ef9701
--- /dev/null
+++ b/include/asm-mips/mach-cobalt/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_COBALT_CPU_FEATURE_OVERRIDES_H
+#define __ASM_COBALT_CPU_FEATURE_OVERRIDES_H
+
+#include <linux/config.h>
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 1
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_watch 0
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 0
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+
+#define cpu_has_subset_pcaches 0
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+#define cpu_scache_line_size() 0
+
+#ifdef CONFIG_64BIT
+#define cpu_has_llsc 0
+#else
+#define cpu_has_llsc 1
+#endif
+
+#define cpu_has_mips16 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_icache_snoops_remote_store 0
+#define cpu_has_dsp 0
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#endif /* __ASM_COBALT_CPU_FEATURE_OVERRIDES_H */
diff --git a/include/asm-mips/cobalt/mach-gt64120.h b/include/asm-mips/mach-cobalt/mach-gt64120.h
index 587fc4378f44..587fc4378f44 100644
--- a/include/asm-mips/cobalt/mach-gt64120.h
+++ b/include/asm-mips/mach-cobalt/mach-gt64120.h
diff --git a/include/asm-mips/mach-ip32/cpu-feature-overrides.h b/include/asm-mips/mach-ip32/cpu-feature-overrides.h
index b80c30725cf6..36070b5654ab 100644
--- a/include/asm-mips/mach-ip32/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-ip32/cpu-feature-overrides.h
@@ -18,7 +18,7 @@
* so, for 64bit IP32 kernel we just don't use ll/sc.
* This does not affect luserland.
*/
-#if defined(CONFIG_CPU_R5000) && defined(CONFIG_64BIT)
+#if (defined(CONFIG_CPU_R5000) || defined(CONFIG_CPU_NEVADA)) && defined(CONFIG_64BIT)
#define cpu_has_llsc 0
#else
#define cpu_has_llsc 1
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h
index a5ea9d828aee..cc53196efa40 100644
--- a/include/asm-mips/r4kcache.h
+++ b/include/asm-mips/r4kcache.h
@@ -166,123 +166,6 @@ static inline void invalidate_tcache_page(unsigned long addr)
: "r" (base), \
"i" (op));
-static inline void blast_dcache16(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + current_cpu_data.dcache.waysize;
- unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
- unsigned long ws_end = current_cpu_data.dcache.ways <<
- current_cpu_data.dcache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x200)
- cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
-}
-
-static inline void blast_dcache16_page(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
-
- do {
- cache16_unroll32(start,Hit_Writeback_Inv_D);
- start += 0x200;
- } while (start < end);
-}
-
-static inline void blast_dcache16_page_indexed(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
- unsigned long ws_end = current_cpu_data.dcache.ways <<
- current_cpu_data.dcache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x200)
- cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
-}
-
-static inline void blast_icache16(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + current_cpu_data.icache.waysize;
- unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
- unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x200)
- cache16_unroll32(addr|ws,Index_Invalidate_I);
-}
-
-static inline void blast_icache16_page(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
-
- do {
- cache16_unroll32(start,Hit_Invalidate_I);
- start += 0x200;
- } while (start < end);
-}
-
-static inline void blast_icache16_page_indexed(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
- unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x200)
- cache16_unroll32(addr|ws,Index_Invalidate_I);
-}
-
-static inline void blast_scache16(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + current_cpu_data.scache.waysize;
- unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
- unsigned long ws_end = current_cpu_data.scache.ways <<
- current_cpu_data.scache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x200)
- cache16_unroll32(addr|ws,Index_Writeback_Inv_SD);
-}
-
-static inline void blast_scache16_page(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = page + PAGE_SIZE;
-
- do {
- cache16_unroll32(start,Hit_Writeback_Inv_SD);
- start += 0x200;
- } while (start < end);
-}
-
-static inline void blast_scache16_page_indexed(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
- unsigned long ws_end = current_cpu_data.scache.ways <<
- current_cpu_data.scache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x200)
- cache16_unroll32(addr|ws,Index_Writeback_Inv_SD);
-}
-
#define cache32_unroll32(base,op) \
__asm__ __volatile__( \
" .set push \n" \
@@ -309,123 +192,6 @@ static inline void blast_scache16_page_indexed(unsigned long page)
: "r" (base), \
"i" (op));
-static inline void blast_dcache32(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + current_cpu_data.dcache.waysize;
- unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
- unsigned long ws_end = current_cpu_data.dcache.ways <<
- current_cpu_data.dcache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x400)
- cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
-}
-
-static inline void blast_dcache32_page(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
-
- do {
- cache32_unroll32(start,Hit_Writeback_Inv_D);
- start += 0x400;
- } while (start < end);
-}
-
-static inline void blast_dcache32_page_indexed(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
- unsigned long ws_end = current_cpu_data.dcache.ways <<
- current_cpu_data.dcache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x400)
- cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
-}
-
-static inline void blast_icache32(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + current_cpu_data.icache.waysize;
- unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
- unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x400)
- cache32_unroll32(addr|ws,Index_Invalidate_I);
-}
-
-static inline void blast_icache32_page(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
-
- do {
- cache32_unroll32(start,Hit_Invalidate_I);
- start += 0x400;
- } while (start < end);
-}
-
-static inline void blast_icache32_page_indexed(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
- unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x400)
- cache32_unroll32(addr|ws,Index_Invalidate_I);
-}
-
-static inline void blast_scache32(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + current_cpu_data.scache.waysize;
- unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
- unsigned long ws_end = current_cpu_data.scache.ways <<
- current_cpu_data.scache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x400)
- cache32_unroll32(addr|ws,Index_Writeback_Inv_SD);
-}
-
-static inline void blast_scache32_page(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = page + PAGE_SIZE;
-
- do {
- cache32_unroll32(start,Hit_Writeback_Inv_SD);
- start += 0x400;
- } while (start < end);
-}
-
-static inline void blast_scache32_page_indexed(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
- unsigned long ws_end = current_cpu_data.scache.ways <<
- current_cpu_data.scache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x400)
- cache32_unroll32(addr|ws,Index_Writeback_Inv_SD);
-}
-
#define cache64_unroll32(base,op) \
__asm__ __volatile__( \
" .set push \n" \
@@ -452,84 +218,6 @@ static inline void blast_scache32_page_indexed(unsigned long page)
: "r" (base), \
"i" (op));
-static inline void blast_icache64(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + current_cpu_data.icache.waysize;
- unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
- unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x800)
- cache64_unroll32(addr|ws,Index_Invalidate_I);
-}
-
-static inline void blast_icache64_page(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
-
- do {
- cache64_unroll32(start,Hit_Invalidate_I);
- start += 0x800;
- } while (start < end);
-}
-
-static inline void blast_icache64_page_indexed(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
- unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x800)
- cache64_unroll32(addr|ws,Index_Invalidate_I);
-}
-
-static inline void blast_scache64(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + current_cpu_data.scache.waysize;
- unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
- unsigned long ws_end = current_cpu_data.scache.ways <<
- current_cpu_data.scache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x800)
- cache64_unroll32(addr|ws,Index_Writeback_Inv_SD);
-}
-
-static inline void blast_scache64_page(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = page + PAGE_SIZE;
-
- do {
- cache64_unroll32(start,Hit_Writeback_Inv_SD);
- start += 0x800;
- } while (start < end);
-}
-
-static inline void blast_scache64_page_indexed(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
- unsigned long ws_end = current_cpu_data.scache.ways <<
- current_cpu_data.scache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x800)
- cache64_unroll32(addr|ws,Index_Writeback_Inv_SD);
-}
-
#define cache128_unroll32(base,op) \
__asm__ __volatile__( \
" .set push \n" \
@@ -556,43 +244,55 @@ static inline void blast_scache64_page_indexed(unsigned long page)
: "r" (base), \
"i" (op));
-static inline void blast_scache128(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + current_cpu_data.scache.waysize;
- unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
- unsigned long ws_end = current_cpu_data.scache.ways <<
- current_cpu_data.scache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x1000)
- cache128_unroll32(addr|ws,Index_Writeback_Inv_SD);
-}
-
-static inline void blast_scache128_page(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = page + PAGE_SIZE;
-
- do {
- cache128_unroll32(start,Hit_Writeback_Inv_SD);
- start += 0x1000;
- } while (start < end);
-}
-
-static inline void blast_scache128_page_indexed(unsigned long page)
-{
- unsigned long start = page;
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
- unsigned long ws_end = current_cpu_data.scache.ways <<
- current_cpu_data.scache.waybit;
- unsigned long ws, addr;
-
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x1000)
- cache128_unroll32(addr|ws,Index_Writeback_Inv_SD);
-}
+/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
+#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
+static inline void blast_##pfx##cache##lsize(void) \
+{ \
+ unsigned long start = INDEX_BASE; \
+ unsigned long end = start + current_cpu_data.desc.waysize; \
+ unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
+ unsigned long ws_end = current_cpu_data.desc.ways << \
+ current_cpu_data.desc.waybit; \
+ unsigned long ws, addr; \
+ \
+ for (ws = 0; ws < ws_end; ws += ws_inc) \
+ for (addr = start; addr < end; addr += lsize * 32) \
+ cache##lsize##_unroll32(addr|ws,indexop); \
+} \
+ \
+static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
+{ \
+ unsigned long start = page; \
+ unsigned long end = page + PAGE_SIZE; \
+ \
+ do { \
+ cache##lsize##_unroll32(start,hitop); \
+ start += lsize * 32; \
+ } while (start < end); \
+} \
+ \
+static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
+{ \
+ unsigned long start = page; \
+ unsigned long end = start + PAGE_SIZE; \
+ unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
+ unsigned long ws_end = current_cpu_data.desc.ways << \
+ current_cpu_data.desc.waybit; \
+ unsigned long ws, addr; \
+ \
+ for (ws = 0; ws < ws_end; ws += ws_inc) \
+ for (addr = start; addr < end; addr += lsize * 32) \
+ cache##lsize##_unroll32(addr|ws,indexop); \
+}
+
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
#endif /* _ASM_R4KCACHE_H */
diff --git a/include/asm-mips/reboot.h b/include/asm-mips/reboot.h
index 2f10ebcbe141..e48c0bfab257 100644
--- a/include/asm-mips/reboot.h
+++ b/include/asm-mips/reboot.h
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1997, 1999, 2001 by Ralf Baechle
+ * Copyright (C) 1997, 1999, 2001, 06 by Ralf Baechle
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#ifndef _ASM_REBOOT_H
@@ -11,6 +11,5 @@
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
-extern void (*_machine_power_off)(void);
#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-mips/string.h b/include/asm-mips/string.h
index 5a06f6d13899..907da600fddd 100644
--- a/include/asm-mips/string.h
+++ b/include/asm-mips/string.h
@@ -141,26 +141,4 @@ extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
-#ifdef CONFIG_32BIT
-#define __HAVE_ARCH_MEMSCAN
-static __inline__ void *memscan(void *__addr, int __c, size_t __size)
-{
- char *__end = (char *)__addr + __size;
- unsigned char __uc = (unsigned char) __c;
-
- __asm__(".set\tpush\n\t"
- ".set\tnoat\n\t"
- ".set\treorder\n\t"
- "1:\tbeq\t%0,%1,2f\n\t"
- "addiu\t%0,1\n\t"
- "lbu\t$1,-1(%0)\n\t"
- "bne\t$1,%z4,1b\n"
- "2:\t.set\tpop"
- : "=r" (__addr), "=r" (__end)
- : "0" (__addr), "1" (__end), "Jr" (__uc));
-
- return __addr;
-}
-#endif /* CONFIG_32BIT */
-
#endif /* _ASM_STRING_H */
diff --git a/include/asm-mips/tx4927/tx4927.h b/include/asm-mips/tx4927/tx4927.h
index 3bb7f0087d68..de85bd2245f7 100644
--- a/include/asm-mips/tx4927/tx4927.h
+++ b/include/asm-mips/tx4927/tx4927.h
@@ -2,7 +2,7 @@
* Author: MontaVista Software, Inc.
* source@mvista.com
*
- * Copyright 2001-2002 MontaVista Software Inc.
+ * Copyright 2001-2006 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -30,10 +30,10 @@
#include <asm/tx4927/tx4927_mips.h>
/*
- This register naming came from the intergrate cpu/controoler name TX4927
+ This register naming came from the integrated CPU/controller name TX4927
followed by the device name from table 4.2.2 on page 4-3 and then followed
by the register name from table 4.2.3 on pages 4-4 to 4-8. The manaul
- used is "TMPR4927BT Preliminary Rev 0.1 20.Jul.2001".
+ used was "TMPR4927BT Preliminary Rev 0.1 20.Jul.2001".
*/
#define TX4927_SIO_0_BASE
@@ -251,8 +251,8 @@
/* TX4927 Timer 0 (32-bit registers) */
#define TX4927_TMR0_BASE 0xf000
-#define TX4927_TMR0_TMTCR0 0xf004
-#define TX4927_TMR0_TMTISR0 0xf008
+#define TX4927_TMR0_TMTCR0 0xf000
+#define TX4927_TMR0_TMTISR0 0xf004
#define TX4927_TMR0_TMCPRA0 0xf008
#define TX4927_TMR0_TMCPRB0 0xf00c
#define TX4927_TMR0_TMITMR0 0xf010
@@ -264,8 +264,8 @@
/* TX4927 Timer 1 (32-bit registers) */
#define TX4927_TMR1_BASE 0xf100
-#define TX4927_TMR1_TMTCR1 0xf104
-#define TX4927_TMR1_TMTISR1 0xf108
+#define TX4927_TMR1_TMTCR1 0xf100
+#define TX4927_TMR1_TMTISR1 0xf104
#define TX4927_TMR1_TMCPRA1 0xf108
#define TX4927_TMR1_TMCPRB1 0xf10c
#define TX4927_TMR1_TMITMR1 0xf110
@@ -277,13 +277,12 @@
/* TX4927 Timer 2 (32-bit registers) */
#define TX4927_TMR2_BASE 0xf200
-#define TX4927_TMR2_TMTCR2 0xf104
-#define TX4927_TMR2_TMTISR2 0xf208
+#define TX4927_TMR2_TMTCR2 0xf200
+#define TX4927_TMR2_TMTISR2 0xf204
#define TX4927_TMR2_TMCPRA2 0xf208
-#define TX4927_TMR2_TMCPRB2 0xf20c
#define TX4927_TMR2_TMITMR2 0xf210
#define TX4927_TMR2_TMCCDR2 0xf220
-#define TX4927_TMR2_TMPGMR2 0xf230
+#define TX4927_TMR2_TMWTMR2 0xf240
#define TX4927_TMR2_TMTRR2 0xf2f0
#define TX4927_TMR2_LIMIT 0xf2ff
diff --git a/include/asm-mips/tx4927/tx4927_pci.h b/include/asm-mips/tx4927/tx4927_pci.h
index 165f6b8b217f..66c064690f41 100644
--- a/include/asm-mips/tx4927/tx4927_pci.h
+++ b/include/asm-mips/tx4927/tx4927_pci.h
@@ -253,6 +253,16 @@ struct tx4927_pcic_reg {
#define TX4927_CCFG_PCIDIVMODE_5 0x00001000
#define TX4927_CCFG_PCIDIVMODE_6 0x00001800
+#define TX4937_CCFG_PCIDIVMODE_MASK 0x00001c00
+#define TX4937_CCFG_PCIDIVMODE_8 0x00000000
+#define TX4937_CCFG_PCIDIVMODE_4 0x00000400
+#define TX4937_CCFG_PCIDIVMODE_9 0x00000800
+#define TX4937_CCFG_PCIDIVMODE_4_5 0x00000c00
+#define TX4937_CCFG_PCIDIVMODE_10 0x00001000
+#define TX4937_CCFG_PCIDIVMODE_5 0x00001400
+#define TX4937_CCFG_PCIDIVMODE_11 0x00001800
+#define TX4937_CCFG_PCIDIVMODE_5_5 0x00001c00
+
/* PCFG : Pin Configuration */
#define TX4927_PCFG_PCICLKEN_ALL 0x003f0000
#define TX4927_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index 41bb96bb2120..91d813a37823 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -202,49 +202,49 @@ struct __large_struct { unsigned long buf[100]; };
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
*/
-#ifdef __mips64
-#define __GET_USER_DW(ptr) __get_user_asm("ld", ptr)
-#else
-#define __GET_USER_DW(ptr) __get_user_asm_ll32(ptr)
+#ifdef CONFIG_32BIT
+#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
+#endif
+#ifdef CONFIG_64BIT
+#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
#endif
-#define __get_user_nocheck(x,ptr,size) \
-({ \
- __typeof(*(ptr)) __gu_val = (__typeof(*(ptr))) 0; \
- long __gu_err = 0; \
- \
+extern void __get_user_unknown(void);
+
+#define __get_user_common(val, size, ptr) \
+do { \
switch (size) { \
- case 1: __get_user_asm("lb", ptr); break; \
- case 2: __get_user_asm("lh", ptr); break; \
- case 4: __get_user_asm("lw", ptr); break; \
- case 8: __GET_USER_DW(ptr); break; \
+ case 1: __get_user_asm(val, "lb", ptr); break; \
+ case 2: __get_user_asm(val, "lh", ptr); break; \
+ case 4: __get_user_asm(val, "lw", ptr); break; \
+ case 8: __GET_USER_DW(val, ptr); break; \
default: __get_user_unknown(); break; \
} \
- (x) = (__typeof__(*(ptr))) __gu_val; \
+} while (0)
+
+#define __get_user_nocheck(x,ptr,size) \
+({ \
+ long __gu_err; \
+ \
+ __get_user_common((x), size, ptr); \
__gu_err; \
})
#define __get_user_check(x,ptr,size) \
({ \
- const __typeof__(*(ptr)) __user * __gu_addr = (ptr); \
- __typeof__(*(ptr)) __gu_val = 0; \
long __gu_err = -EFAULT; \
+ const void __user * __gu_ptr = (ptr); \
+ \
+ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
+ __get_user_common((x), size, __gu_ptr); \
\
- if (likely(access_ok(VERIFY_READ, __gu_addr, size))) { \
- switch (size) { \
- case 1: __get_user_asm("lb", __gu_addr); break; \
- case 2: __get_user_asm("lh", __gu_addr); break; \
- case 4: __get_user_asm("lw", __gu_addr); break; \
- case 8: __GET_USER_DW(__gu_addr); break; \
- default: __get_user_unknown(); break; \
- } \
- } \
- (x) = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
-#define __get_user_asm(insn, addr) \
+#define __get_user_asm(val, insn, addr) \
{ \
+ long __gu_tmp; \
+ \
__asm__ __volatile__( \
"1: " insn " %1, %3 \n" \
"2: \n" \
@@ -255,14 +255,16 @@ struct __large_struct { unsigned long buf[100]; };
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 3b \n" \
" .previous \n" \
- : "=r" (__gu_err), "=r" (__gu_val) \
+ : "=r" (__gu_err), "=r" (__gu_tmp) \
: "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
+ \
+ (val) = (__typeof__(val)) __gu_tmp; \
}
/*
* Get a long long 64 using 32 bit registers.
*/
-#define __get_user_asm_ll32(addr) \
+#define __get_user_asm_ll32(val, addr) \
{ \
__asm__ __volatile__( \
"1: lw %1, (%3) \n" \
@@ -278,21 +280,20 @@ struct __large_struct { unsigned long buf[100]; };
" " __UA_ADDR " 1b, 4b \n" \
" " __UA_ADDR " 2b, 4b \n" \
" .previous \n" \
- : "=r" (__gu_err), "=&r" (__gu_val) \
+ : "=r" (__gu_err), "=&r" (val) \
: "0" (0), "r" (addr), "i" (-EFAULT)); \
}
-extern void __get_user_unknown(void);
-
/*
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
*/
-#ifdef __mips64
-#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
-#else
+#ifdef CONFIG_32BIT
#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
#endif
+#ifdef CONFIG_64BIT
+#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
+#endif
#define __put_user_nocheck(x,ptr,size) \
({ \