patch-2.4.9 linux/include/asm-sparc64/mmu_context.h
Next file: linux/include/asm-sparc64/pbm.h
Previous file: linux/include/asm-sparc64/keyboard.h
Back to the patch index
Back to the overall index
- Lines: 40
- Date:
Tue Aug 14 19:57:29 2001
- Orig file:
v2.4.8/linux/include/asm-sparc64/mmu_context.h
- Orig date:
Sun Aug 12 13:28:01 2001
diff -u --recursive --new-file v2.4.8/linux/include/asm-sparc64/mmu_context.h linux/include/asm-sparc64/mmu_context.h
@@ -1,4 +1,4 @@
-/* $Id: mmu_context.h,v 1.48 2001/08/03 06:18:52 davem Exp $ */
+/* $Id: mmu_context.h,v 1.50 2001/08/13 20:24:34 kanoj Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
@@ -9,6 +9,7 @@
#include <linux/spinlock.h>
#include <asm/system.h>
#include <asm/spitfire.h>
+#include <asm/page.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
@@ -18,12 +19,23 @@
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
-#define CTX_VERSION_SHIFT (PAGE_SHIFT - 3)
+/*
+ * For the 8k pagesize kernel, use only 10 hw context bits to optimize some shifts in
+ * the fast tlbmiss handlers, instead of all 13 bits (specifically for vpte offset
+ * calculation). For other pagesizes, this optimization in the tlbhandlers can not be
+ * done; but still, all 13 bits can not be used because the tlb handlers use "andcc"
+ * instruction which sign extends 13 bit arguments.
+ */
+#if PAGE_SHIFT == 13
+#define CTX_VERSION_SHIFT 10
+#else
+#define CTX_VERSION_SHIFT 12
+#endif
+
#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
#define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL)
#define CTX_VALID(__ctx) \
(!(((__ctx) ^ tlb_context_cache) & CTX_VERSION_MASK))
-#define CTX_NEVER_WAS_VALID(__ctx) ((__ctx) == 0UL)
#define CTX_HWBITS(__ctx) ((__ctx) & ~CTX_VERSION_MASK)
extern void get_new_mmu_context(struct mm_struct *mm);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)