patch-2.1.44 linux/include/asm-mips/system.h
Next file: linux/include/asm-mips/termbits.h
Previous file: linux/include/asm-mips/sysmips.h
Back to the patch index
Back to the overall index
- Lines: 273
- Date:
Thu Jun 26 12:33:40 1997
- Orig file:
v2.1.43/linux/include/asm-mips/system.h
- Orig date:
Wed Dec 13 02:39:47 1995
diff -u --recursive --new-file v2.1.43/linux/include/asm-mips/system.h linux/include/asm-mips/system.h
@@ -6,33 +6,73 @@
* for more details.
*
* Copyright (C) 1994, 1995 by Ralf Baechle
+ * Modified further for R[236]000 by Paul M. Antoine, 1996
*/
#ifndef __ASM_MIPS_SYSTEM_H
#define __ASM_MIPS_SYSTEM_H
+#include <asm/sgidefs.h>
#include <linux/kernel.h>
-#if defined (__R4000__)
-#define sti() \
+extern __inline__ void
+__sti(void)
+{
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ "mfc0\t$1,$12\n\t"
+ "ori\t$1,0x1f\n\t"
+ "xori\t$1,0x1e\n\t"
+ "mtc0\t$1,$12\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : /* no outputs */
+ : /* no inputs */
+ : "$1", "memory");
+}
+
+/*
+ * For cli() we have to insert nops to make shure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+extern __inline__ void
+__cli(void)
+{
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ "mfc0\t$1,$12\n\t"
+ "ori\t$1,1\n\t"
+ "xori\t$1,1\n\t"
+ "mtc0\t$1,$12\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : /* no outputs */
+ : /* no inputs */
+ : "$1", "memory");
+}
+
+#define __save_flags(x) \
__asm__ __volatile__( \
".set\tnoreorder\n\t" \
- ".set\tnoat\n\t" \
- "mfc0\t$1,$12\n\t" \
- "ori\t$1,0x1f\n\t" \
- "xori\t$1,0x1e\n\t" \
- "mtc0\t$1,$12\n\t" \
- ".set\tat\n\t" \
+ "mfc0\t%0,$12\n\t" \
".set\treorder" \
- : /* no outputs */ \
+ : "=r" (x) \
: /* no inputs */ \
- : "$1")
+ : "memory")
-#define cli() \
+#define __save_and_cli(x) \
__asm__ __volatile__( \
".set\tnoreorder\n\t" \
".set\tnoat\n\t" \
- "mfc0\t$1,$12\n\t" \
- "ori\t$1,1\n\t" \
+ "mfc0\t%0,$12\n\t" \
+ "ori\t$1,%0,1\n\t" \
"xori\t$1,1\n\t" \
"mtc0\t$1,$12\n\t" \
"nop\n\t" \
@@ -40,78 +80,71 @@
"nop\n\t" \
".set\tat\n\t" \
".set\treorder" \
- : /* no outputs */ \
+ : "=r" (x) \
: /* no inputs */ \
- : "$1")
+ : "$1", "memory")
+
+extern void __inline__
+__restore_flags(int flags)
+{
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ "mtc0\t%0,$12\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".set\treorder"
+ : /* no output */
+ : "r" (flags)
+ : "memory");
+}
-#else /* !defined (__R4000__) */
/*
- * Untested goodies for the R3000 based DECstation et al.
+ * Non-SMP versions ...
*/
-#define sti() \
-__asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- ".set\tnoat\n\t" \
- "mfc0\t$1,$12\n\t" \
- "ori\t$1,0x01\n\t" \
- "mtc0\t$1,$12\n\t" \
- ".set\tat\n\t" \
- ".set\treorder" \
- : /* no outputs */ \
- : /* no inputs */ \
- : "$1")
-
-#define cli() \
-__asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- ".set\tnoat\n\t" \
- "mfc0\t$1,$12\n\t" \
- "ori\t$1,1\n\t" \
- "xori\t$1,1\n\t" \
- "mtc0\t$1,$12\n\t" \
- ".set\tat\n\t" \
- ".set\treorder" \
- : /* no outputs */ \
- : /* no inputs */ \
- : "$1")
-#endif /* !defined (__R4000__) */
+#define sti() __sti()
+#define cli() __cli()
+#define save_flags(x) __save_flags(x)
+#define save_and_cli(x) __save_and_cli(x)
+#define restore_flags(x) __restore_flags(x)
-#define nop() __asm__ __volatile__ ("nop")
-
-#define save_flags(x) \
+#define sync_mem() \
__asm__ __volatile__( \
".set\tnoreorder\n\t" \
- "mfc0\t%0,$12\n\t" \
+ "sync\n\t" \
".set\treorder" \
- : "=r" (x)) \
+ : /* no output */ \
+ : /* no input */ \
+ : "memory")
-#define restore_flags(x) \
-__asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- "mtc0\t%0,$12\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- ".set\treorder" \
- : /* no output */ \
- : "r" (x)) \
+#if !defined (__LANGUAGE_ASSEMBLY__)
+/*
+ * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ */
+extern asmlinkage void (*resume)(void *tsk);
+#endif /* !defined (__LANGUAGE_ASSEMBLY__) */
-#define sync_mem() \
-__asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- "sync\n\t" \
- ".set\treorder") \
+/*
+ * FIXME: resume() assumes current == prev
+ */
+#define switch_to(prev,next) \
+do { \
+ prev->tss.current_ds = active_ds; \
+ active_ds = next->tss.current_ds; \
+ resume(next); \
+} while(0)
/*
* The 8 and 16 bit variants have to disable interrupts temporarily.
* Both are currently unused.
*/
-extern inline unsigned long xchg_u8(volatile char * m, unsigned long val)
+extern __inline__ unsigned long xchg_u8(volatile char * m, unsigned long val)
{
unsigned long flags, retval;
save_flags(flags);
- sti();
+ cli();
retval = *m;
*m = val;
restore_flags(flags);
@@ -119,12 +152,12 @@
return retval;
}
-extern inline unsigned long xchg_u16(volatile short * m, unsigned long val)
+extern __inline__ unsigned long xchg_u16(volatile short * m, unsigned long val)
{
unsigned long flags, retval;
save_flags(flags);
- sti();
+ cli();
retval = *m;
*m = val;
restore_flags(flags);
@@ -136,8 +169,10 @@
* For 32 and 64 bit operands we can take advantage of ll and sc.
* FIXME: This doesn't work for R3000 machines.
*/
-extern inline unsigned long xchg_u32(volatile int * m, unsigned long val)
+extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
{
+#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) || \
+ (_MIPS_ISA == _MIPS_ISA_MIPS4) || (_MIPS_ISA == _MIPS_ISA_MIPS5)
unsigned long dummy;
__asm__ __volatile__(
@@ -152,14 +187,23 @@
".set\treorder"
: "=r" (val), "=r" (m), "=r" (dummy)
: "1" (m), "2" (val));
+#else /* FIXME: Brain-dead approach, but then again, I AM hacking - PMA */
+ unsigned long flags, retval;
+
+ save_flags(flags);
+ cli();
+ retval = *m;
+ *m = val;
+ restore_flags(flags);
+#endif /* Processor-dependent optimization */
return val;
}
/*
* Only used for 64 bit kernel.
*/
-extern inline unsigned long xchg_u64(volatile long * m, unsigned long val)
+extern __inline__ unsigned long xchg_u64(volatile long * m, unsigned long val)
{
unsigned long dummy;
@@ -192,7 +236,7 @@
*/
extern void __xchg_called_with_bad_pointer(void);
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 1:
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov