patch-2.1.34 linux/include/asm-sparc/atomic.h
Next file: linux/include/asm-sparc/bitops.h
Previous file: linux/include/asm-ppc/semaphore.h
Back to the patch index
Back to the overall index
- Lines: 120
- Date:
Mon Apr 14 09:31:09 1997
- Orig file:
v2.1.33/linux/include/asm-sparc/atomic.h
- Orig date:
Sun Jan 26 02:07:47 1997
diff -u --recursive --new-file v2.1.33/linux/include/asm-sparc/atomic.h linux/include/asm-sparc/atomic.h
@@ -6,7 +6,14 @@
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
-typedef int atomic_t;
+#ifdef __SMP__
+/* This is a temporary measure. -DaveM */
+typedef struct { volatile int counter; } atomic_t;
+#else
+typedef struct { int counter; } atomic_t;
+#endif
+
+#define ATOMIC_INIT { 0 }
#ifdef __KERNEL__
#include <asm/system.h>
@@ -15,19 +22,38 @@
/* We do the bulk of the actual work out of line in two common
* routines in assembler, see arch/sparc/lib/atomic.S for the
* "fun" details.
+ *
+ * For SMP the trick is you embed the spin lock byte within
+ * the word, use the low byte so signedness is easily retained
+ * via a quick arithmetic shift. It looks like this:
+ *
+ * ----------------------------------------
+ * | signed 24-bit counter value | lock | atomic_t
+ * ----------------------------------------
+ * 31 8 7 0
*/
+static __inline__ int atomic_read(atomic_t *v)
+{
+ int val;
+
+ __asm__ __volatile__("sra %1, 0x8, %0"
+ : "=r" (val)
+ : "r" (v->counter));
+ return val;
+}
+#define atomic_set(v, i) (((v)->counter) = ((i) << 8))
+
/* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
#define __atomic_fool_gcc(x) ((struct { int a[100]; } *)x)
-static __inline__ void atomic_add(atomic_t i, atomic_t *v)
+static __inline__ void atomic_add(int i, atomic_t *v)
{
register atomic_t *ptr asm("g1");
- register atomic_t increment asm("g2");
-
+ register int increment asm("g2");
ptr = (atomic_t *) __atomic_fool_gcc(v);
increment = i;
@@ -37,13 +63,13 @@
add %%o7, 8, %%o7
" : "=&r" (increment)
: "0" (increment), "r" (ptr)
- : "g3", "g4", "g7", "memory");
+ : "g3", "g4", "g7", "memory", "cc");
}
-static __inline__ void atomic_sub(atomic_t i, atomic_t *v)
+static __inline__ void atomic_sub(int i, atomic_t *v)
{
register atomic_t *ptr asm("g1");
- register atomic_t increment asm("g2");
+ register int increment asm("g2");
ptr = (atomic_t *) __atomic_fool_gcc(v);
increment = i;
@@ -54,13 +80,13 @@
add %%o7, 8, %%o7
" : "=&r" (increment)
: "0" (increment), "r" (ptr)
- : "g3", "g4", "g7", "memory");
+ : "g3", "g4", "g7", "memory", "cc");
}
-static __inline__ int atomic_add_return(atomic_t i, atomic_t *v)
+static __inline__ int atomic_add_return(int i, atomic_t *v)
{
register atomic_t *ptr asm("g1");
- register atomic_t increment asm("g2");
+ register int increment asm("g2");
ptr = (atomic_t *) __atomic_fool_gcc(v);
increment = i;
@@ -71,15 +97,15 @@
add %%o7, 8, %%o7
" : "=&r" (increment)
: "0" (increment), "r" (ptr)
- : "g3", "g4", "g7", "memory");
+ : "g3", "g4", "g7", "memory", "cc");
return increment;
}
-static __inline__ int atomic_sub_return(atomic_t i, atomic_t *v)
+static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
register atomic_t *ptr asm("g1");
- register atomic_t increment asm("g2");
+ register int increment asm("g2");
ptr = (atomic_t *) __atomic_fool_gcc(v);
increment = i;
@@ -90,7 +116,7 @@
add %%o7, 8, %%o7
" : "=&r" (increment)
: "0" (increment), "r" (ptr)
- : "g3", "g4", "g7", "memory");
+ : "g3", "g4", "g7", "memory", "cc");
return increment;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov