diff options
author | Haavard Skinnemoen <hskinnemoen@atmel.com> | 2006-09-25 23:32:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 08:48:54 -0700 |
commit | 5f97f7f9400de47ae837170bb274e90ad3934386 (patch) | |
tree | 514451e6dc6b46253293a00035d375e77b1c65ed /include/asm-avr32/cacheflush.h | |
parent | 53e62d3aaa60590d4a69b4e07c29f448b5151047 (diff) | |
download | linux-3.10-5f97f7f9400de47ae837170bb274e90ad3934386.tar.gz linux-3.10-5f97f7f9400de47ae837170bb274e90ad3934386.tar.bz2 linux-3.10-5f97f7f9400de47ae837170bb274e90ad3934386.zip |
[PATCH] avr32 architecture
This adds support for the Atmel AVR32 architecture as well as the AT32AP7000
CPU and the AT32STK1000 development board.
AVR32 is a new high-performance 32-bit RISC microprocessor core, designed for
cost-sensitive embedded applications, with particular emphasis on low power
consumption and high code density. The AVR32 architecture is not binary
compatible with earlier 8-bit AVR architectures.
The AVR32 architecture, including the instruction set, is described by the
AVR32 Architecture Manual, available from
http://www.atmel.com/dyn/resources/prod_documents/doc32000.pdf
The Atmel AT32AP7000 is the first CPU implementing the AVR32 architecture. It
features a 7-stage pipeline, 16KB instruction and data caches and a full
Memory Management Unit. It also comes with a large set of integrated
peripherals, many of which are shared with the AT91 ARM-based controllers from
Atmel.
Full data sheet is available from
http://www.atmel.com/dyn/resources/prod_documents/doc32003.pdf
while the CPU core implementation including caches and MMU is documented by
the AVR32 AP Technical Reference, available from
http://www.atmel.com/dyn/resources/prod_documents/doc32001.pdf
Information about the AT32STK1000 development board can be found at
http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3918
including a BSP CD image with an earlier version of this patch, development
tools (binaries and source/patches) and a root filesystem image suitable for
booting from SD card.
Alternatively, there's a preliminary "getting started" guide available at
http://avr32linux.org/twiki/bin/view/Main/GettingStarted which provides links
to the sources and patches you will need in order to set up a cross-compiling
environment for avr32-linux.
This patch, as well as the other patches included with the BSP and the
toolchain patches, is actively supported by Atmel Corporation.
[dmccr@us.ibm.com: Fix more pxx_page macro locations]
[bunk@stusta.de: fix `make defconfig']
Signed-off-by: Haavard Skinnemoen <hskinnemoen@atmel.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Dave McCracken <dmccr@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-avr32/cacheflush.h')
-rw-r--r-- | include/asm-avr32/cacheflush.h | 129 |
1 files changed, 129 insertions, 0 deletions
diff --git a/include/asm-avr32/cacheflush.h b/include/asm-avr32/cacheflush.h new file mode 100644 index 00000000000..f1bf1708980 --- /dev/null +++ b/include/asm-avr32/cacheflush.h @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_AVR32_CACHEFLUSH_H +#define __ASM_AVR32_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include <linux/mm.h> + +#define CACHE_OP_ICACHE_INVALIDATE 0x01 +#define CACHE_OP_DCACHE_INVALIDATE 0x0b +#define CACHE_OP_DCACHE_CLEAN 0x0c +#define CACHE_OP_DCACHE_CLEAN_INVAL 0x0d + +/* + * Invalidate any cacheline containing virtual address vaddr without + * writing anything back to memory. + * + * Note that this function may corrupt unrelated data structures when + * applied on buffers that are not cacheline aligned in both ends. + */ +static inline void invalidate_dcache_line(void *vaddr) +{ + asm volatile("cache %0[0], %1" + : + : "r"(vaddr), "n"(CACHE_OP_DCACHE_INVALIDATE) + : "memory"); +} + +/* + * Make sure any cacheline containing virtual address vaddr is written + * to memory. + */ +static inline void clean_dcache_line(void *vaddr) +{ + asm volatile("cache %0[0], %1" + : + : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN) + : "memory"); +} + +/* + * Make sure any cacheline containing virtual address vaddr is written + * to memory and then invalidate it. + */ +static inline void flush_dcache_line(void *vaddr) +{ + asm volatile("cache %0[0], %1" + : + : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN_INVAL) + : "memory"); +} + +/* + * Invalidate any instruction cacheline containing virtual address + * vaddr. + */ +static inline void invalidate_icache_line(void *vaddr) +{ + asm volatile("cache %0[0], %1" + : + : "r"(vaddr), "n"(CACHE_OP_ICACHE_INVALIDATE) + : "memory"); +} + +/* + * Applies the above functions on all lines that are touched by the + * specified virtual address range. + */ +void invalidate_dcache_region(void *start, size_t len); +void clean_dcache_region(void *start, size_t len); +void flush_dcache_region(void *start, size_t len); +void invalidate_icache_region(void *start, size_t len); + +/* + * Make sure any pending writes are completed before continuing. + */ +#define flush_write_buffer() asm volatile("sync 0" : : : "memory") + +/* + * The following functions are called when a virtual mapping changes. + * We do not need to flush anything in this case. + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +/* + * I think we need to implement this one to be able to reliably + * execute pages from RAMDISK. However, if we implement the + * flush_dcache_*() functions, it might not be needed anymore. + * + * #define flush_icache_page(vma, page) do { } while (0) + */ +extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); + +/* + * These are (I think) related to D-cache aliasing. We might need to + * do something here, but only for certain configurations. No such + * configurations exist at this time. + */ +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(page) do { } while (0) +#define flush_dcache_mmap_unlock(page) do { } while (0) + +/* + * These are for I/D cache coherency. In this case, we do need to + * flush with all configurations. + */ +extern void flush_icache_range(unsigned long start, unsigned long end); +extern void flush_icache_user_range(struct vm_area_struct *vma, + struct page *page, + unsigned long addr, int len); + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) do { \ + memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ +} while(0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* __ASM_AVR32_CACHEFLUSH_H */ |