From 98c5584cfc47932c4f3ccf5eee2e0bae1447b85e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:20 -0800 Subject: [SPARC64]: Add infrastructure for dynamic TSB sizing. This also cleans up tsb_context_switch(). The assembler routine is now __tsb_context_switch() and the former is an inline function that picks out the bits from the mm_struct and passes it into the assembler code as arguments. setup_tsb_parms() computes the locked TLB entry to map the TSB. Later when we support using the physical address quad load instructions of Cheetah+ and later, we'll simply use the physical address for the TSB register value and set the map virtual and PTE both to zero. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu.h | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'include/asm-sparc64/mmu.h') diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 36384cf7faa..2effeba2476 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -90,9 +90,20 @@ #ifndef __ASSEMBLY__ +#define TSB_ENTRY_ALIGNMENT 16 + +struct tsb { + unsigned long tag; + unsigned long pte; +} __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); + typedef struct { unsigned long sparc64_ctx_val; - unsigned long *sparc64_tsb; + struct tsb *tsb; + unsigned long tsb_nentries; + unsigned long tsb_reg_val; + unsigned long tsb_map_vaddr; + unsigned long tsb_map_pte; } mm_context_t; #endif /* !__ASSEMBLY__ */ -- cgit v1.2.3