patch-2.1.101 linux/arch/sparc64/kernel/smp.c
Next file: linux/arch/sparc64/kernel/sparc64_ksyms.c
Previous file: linux/arch/sparc64/kernel/ptrace.c
Back to the patch index
Back to the overall index
- Lines: 79
- Date:
Fri May 8 00:11:29 1998
- Orig file:
v2.1.100/linux/arch/sparc64/kernel/smp.c
- Orig date:
Thu Apr 23 20:21:32 1998
diff -u --recursive --new-file v2.1.100/linux/arch/sparc64/kernel/smp.c linux/arch/sparc64/kernel/smp.c
@@ -85,7 +85,6 @@
{
cpu_data[id].udelay_val = loops_per_sec;
cpu_data[id].irq_count = 0;
- cpu_data[id].last_tlbversion_seen = tlb_context_cache & CTX_VERSION_MASK;
cpu_data[id].pgcache_size = 0;
cpu_data[id].pgd_cache = NULL;
cpu_data[id].pmd_cache = NULL;
@@ -368,16 +367,28 @@
__flush_tlb_all();
}
+/* We know that the window frames of the user have been flushed
+ * to the stack before we get here because all callers of us
+ * are flush_tlb_*() routines, and these run after flush_cache_*()
+ * which performs the flushw.
+ */
static void smp_cross_call_avoidance(struct mm_struct *mm)
{
+ u32 ctx;
+
spin_lock(&scheduler_lock);
- get_new_mmu_context(mm, &tlb_context_cache);
+ get_new_mmu_context(mm);
mm->cpu_vm_mask = (1UL << smp_processor_id());
- if(segment_eq(current->tss.current_ds,USER_DS)) {
- u32 ctx = mm->context & 0x1fff;
-
- current->tss.ctx = ctx;
- spitfire_set_secondary_context(ctx);
+ current->tss.ctx = ctx = mm->context & 0x3ff;
+ spitfire_set_secondary_context(ctx);
+ __asm__ __volatile__("flush %g6");
+ spitfire_flush_dtlb_secondary_context();
+ spitfire_flush_itlb_secondary_context();
+ __asm__ __volatile__("flush %g6");
+ if(!segment_eq(current->tss.current_ds,USER_DS)) {
+ /* Rarely happens. */
+ current->tss.ctx = 0;
+ spitfire_set_secondary_context(0);
__asm__ __volatile__("flush %g6");
}
spin_unlock(&scheduler_lock);
@@ -385,7 +396,7 @@
void smp_flush_tlb_mm(struct mm_struct *mm)
{
- u32 ctx = mm->context & 0x1fff;
+ u32 ctx = mm->context & 0x3ff;
if(mm == current->mm && mm->count == 1) {
if(mm->cpu_vm_mask == (1UL << smp_processor_id()))
@@ -401,7 +412,7 @@
void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
- u32 ctx = mm->context & 0x1fff;
+ u32 ctx = mm->context & 0x3ff;
if(mm == current->mm && mm->count == 1) {
if(mm->cpu_vm_mask == (1UL << smp_processor_id()))
@@ -416,7 +427,7 @@
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
{
- u32 ctx = mm->context & 0x1fff;
+ u32 ctx = mm->context & 0x3ff;
if(mm == current->mm && mm->count == 1) {
if(mm->cpu_vm_mask == (1UL << smp_processor_id()))
@@ -424,7 +435,7 @@
return smp_cross_call_avoidance(mm);
}
#if 0 /* XXX Disabled until further notice... */
- else if(mm != current->mm && mm->count == 1) {
+ else if(mm->count == 1) {
/* Try to handle two special cases to avoid cross calls
* in common scenerios where we are swapping process
* pages out.
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov