patch-2.4.19 linux-2.4.19/include/asm-s390/bitops.h
Next file: linux-2.4.19/include/asm-s390/debug.h
Previous file: linux-2.4.19/include/asm-s390/atomic.h
Back to the patch index
Back to the overall index
- Lines: 825
- Date:
Fri Aug 2 17:39:45 2002
- Orig file:
linux-2.4.18/include/asm-s390/bitops.h
- Orig date:
Wed Jul 25 14:12:02 2001
diff -urN linux-2.4.18/include/asm-s390/bitops.h linux-2.4.19/include/asm-s390/bitops.h
@@ -54,28 +54,29 @@
*/
static __inline__ void set_bit_cs(int nr, volatile void * addr)
{
+ unsigned long bits, mask;
__asm__ __volatile__(
#if ALIGN_CS == 1
- " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
- " nr 1,%1\n" /* isolate last 2 bits of address */
- " xr %1,1\n" /* make addr % 4 == 0 */
- " sll 1,3\n"
- " ar %0,1\n" /* add alignement to bitnr */
+ " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
+ " nr %2,%1\n" /* isolate last 2 bits of address */
+ " xr %1,%2\n" /* make addr % 4 == 0 */
+ " sll %2,3\n"
+ " ar %0,%2\n" /* add alignement to bitnr */
#endif
- " lhi 1,31\n"
- " nr 1,%0\n" /* make shift value */
- " xr %0,1\n"
+ " lhi %2,31\n"
+ " nr %2,%0\n" /* make shift value */
+ " xr %0,%2\n"
" srl %0,3\n"
- " lhi 2,1\n"
+ " lhi %3,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
- " sll 2,0(1)\n" /* make OR mask */
+ " sll %3,0(%2)\n" /* make OR mask */
" l %0,0(%1)\n"
- "0: lr 1,%0\n" /* CS loop starts here */
- " or 1,2\n" /* set bit */
- " cs %0,1,0(%1)\n"
+ "0: lr %2,%0\n" /* CS loop starts here */
+ " or %2,%3\n" /* set bit */
+ " cs %0,%2,0(%1)\n"
" jl 0b"
- : "+a" (nr), "+a" (addr) :
- : "cc", "memory", "1", "2" );
+ : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
+ : "cc", "memory" );
}
/*
@@ -83,30 +84,31 @@
*/
static __inline__ void clear_bit_cs(int nr, volatile void * addr)
{
- static const int mask = -1;
+ static const int minusone = -1;
+ unsigned long bits, mask;
__asm__ __volatile__(
#if ALIGN_CS == 1
- " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
- " nr 1,%1\n" /* isolate last 2 bits of address */
- " xr %1,1\n" /* make addr % 4 == 0 */
- " sll 1,3\n"
- " ar %0,1\n" /* add alignement to bitnr */
+ " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
+ " nr %2,%1\n" /* isolate last 2 bits of address */
+ " xr %1,%2\n" /* make addr % 4 == 0 */
+ " sll %2,3\n"
+ " ar %0,%2\n" /* add alignement to bitnr */
#endif
- " lhi 1,31\n"
- " nr 1,%0\n" /* make shift value */
- " xr %0,1\n"
+ " lhi %2,31\n"
+ " nr %2,%0\n" /* make shift value */
+ " xr %0,%2\n"
" srl %0,3\n"
- " lhi 2,1\n"
+ " lhi %3,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
- " sll 2,0(1)\n"
- " x 2,%2\n" /* make AND mask */
+ " sll %3,0(%2)\n"
+ " x %3,%4\n" /* make AND mask */
" l %0,0(%1)\n"
- "0: lr 1,%0\n" /* CS loop starts here */
- " nr 1,2\n" /* clear bit */
- " cs %0,1,0(%1)\n"
+ "0: lr %2,%0\n" /* CS loop starts here */
+ " nr %2,%3\n" /* clear bit */
+ " cs %0,%2,0(%1)\n"
" jl 0b"
- : "+a" (nr), "+a" (addr) : "m" (mask)
- : "cc", "memory", "1", "2" );
+ : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask)
+ : "m" (minusone) : "cc", "memory" );
}
/*
@@ -114,28 +116,29 @@
*/
static __inline__ void change_bit_cs(int nr, volatile void * addr)
{
+ unsigned long bits, mask;
__asm__ __volatile__(
#if ALIGN_CS == 1
- " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
- " nr 1,%1\n" /* isolate last 2 bits of address */
- " xr %1,1\n" /* make addr % 4 == 0 */
- " sll 1,3\n"
- " ar %0,1\n" /* add alignement to bitnr */
+ " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
+ " nr %2,%1\n" /* isolate last 2 bits of address */
+ " xr %1,%2\n" /* make addr % 4 == 0 */
+ " sll %2,3\n"
+ " ar %0,%2\n" /* add alignement to bitnr */
#endif
- " lhi 1,31\n"
- " nr 1,%0\n" /* make shift value */
- " xr %0,1\n"
+ " lhi %2,31\n"
+ " nr %2,%0\n" /* make shift value */
+ " xr %0,%2\n"
" srl %0,3\n"
- " lhi 2,1\n"
+ " lhi %3,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
- " sll 2,0(1)\n" /* make XR mask */
+ " sll %3,0(%2)\n" /* make XR mask */
" l %0,0(%1)\n"
- "0: lr 1,%0\n" /* CS loop starts here */
- " xr 1,2\n" /* change bit */
- " cs %0,1,0(%1)\n"
+ "0: lr %2,%0\n" /* CS loop starts here */
+ " xr %2,%3\n" /* change bit */
+ " cs %0,%2,0(%1)\n"
" jl 0b"
- : "+a" (nr), "+a" (addr) :
- : "cc", "memory", "1", "2" );
+ : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
+ : "cc", "memory" );
}
/*
@@ -143,30 +146,31 @@
*/
static __inline__ int test_and_set_bit_cs(int nr, volatile void * addr)
{
+ unsigned long bits, mask;
__asm__ __volatile__(
#if ALIGN_CS == 1
- " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
- " nr 1,%1\n" /* isolate last 2 bits of address */
- " xr %1,1\n" /* make addr % 4 == 0 */
- " sll 1,3\n"
- " ar %0,1\n" /* add alignement to bitnr */
+ " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
+ " nr %2,%1\n" /* isolate last 2 bits of address */
+ " xr %1,%2\n" /* make addr % 4 == 0 */
+ " sll %2,3\n"
+ " ar %0,%2\n" /* add alignement to bitnr */
#endif
- " lhi 1,31\n"
- " nr 1,%0\n" /* make shift value */
- " xr %0,1\n"
+ " lhi %2,31\n"
+ " nr %2,%0\n" /* make shift value */
+ " xr %0,%2\n"
" srl %0,3\n"
+ " lhi %3,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
- " lhi 2,1\n"
- " sll 2,0(1)\n" /* make OR mask */
+ " sll %3,0(%2)\n" /* make OR mask */
" l %0,0(%1)\n"
- "0: lr 1,%0\n" /* CS loop starts here */
- " or 1,2\n" /* set bit */
- " cs %0,1,0(%1)\n"
+ "0: lr %2,%0\n" /* CS loop starts here */
+ " or %2,%3\n" /* set bit */
+ " cs %0,%2,0(%1)\n"
" jl 0b\n"
- " nr %0,2\n" /* isolate old bit */
- : "+a" (nr), "+a" (addr) :
- : "cc", "memory", "1", "2" );
- return nr;
+ " nr %0,%3\n" /* isolate old bit */
+ : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
+ : "cc", "memory" );
+ return nr != 0;
}
/*
@@ -174,32 +178,33 @@
*/
static __inline__ int test_and_clear_bit_cs(int nr, volatile void * addr)
{
- static const int mask = -1;
+ static const int minusone = -1;
+ unsigned long bits, mask;
__asm__ __volatile__(
#if ALIGN_CS == 1
- " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
- " nr 1,%1\n" /* isolate last 2 bits of address */
- " xr %1,1\n" /* make addr % 4 == 0 */
- " sll 1,3\n"
- " ar %0,1\n" /* add alignement to bitnr */
+ " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
+ " nr %2,%1\n" /* isolate last 2 bits of address */
+ " xr %1,%2\n" /* make addr % 4 == 0 */
+ " sll %2,3\n"
+ " ar %0,%2\n" /* add alignement to bitnr */
#endif
- " lhi 1,31\n"
- " nr 1,%0\n" /* make shift value */
- " xr %0,1\n"
+ " lhi %2,31\n"
+ " nr %2,%0\n" /* make shift value */
+ " xr %0,%2\n"
" srl %0,3\n"
+ " lhi %3,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
- " lhi 2,1\n"
- " sll 2,0(1)\n"
- " x 2,%2\n" /* make AND mask */
+ " sll %3,0(%2)\n"
" l %0,0(%1)\n"
- "0: lr 1,%0\n" /* CS loop starts here */
- " nr 1,2\n" /* clear bit */
- " cs %0,1,0(%1)\n"
+ " x %3,%4\n" /* make AND mask */
+ "0: lr %2,%0\n" /* CS loop starts here */
+ " nr %2,%3\n" /* clear bit */
+ " cs %0,%2,0(%1)\n"
" jl 0b\n"
- " x 2,%2\n"
- " nr %0,2\n" /* isolate old bit */
- : "+a" (nr), "+a" (addr) : "m" (mask)
- : "cc", "memory", "1", "2" );
+ " x %3,%4\n"
+ " nr %0,%3\n" /* isolate old bit */
+ : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask)
+ : "m" (minusone) : "cc", "memory" );
return nr;
}
@@ -208,30 +213,31 @@
*/
static __inline__ int test_and_change_bit_cs(int nr, volatile void * addr)
{
+ unsigned long bits, mask;
__asm__ __volatile__(
#if ALIGN_CS == 1
- " lhi 1,3\n" /* CS must be aligned on 4 byte b. */
- " nr 1,%1\n" /* isolate last 2 bits of address */
- " xr %1,1\n" /* make addr % 4 == 0 */
- " sll 1,3\n"
- " ar %0,1\n" /* add alignement to bitnr */
+ " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
+ " nr %2,%1\n" /* isolate last 2 bits of address */
+ " xr %1,%2\n" /* make addr % 4 == 0 */
+ " sll %2,3\n"
+ " ar %0,%2\n" /* add alignement to bitnr */
#endif
- " lhi 1,31\n"
- " nr 1,%0\n" /* make shift value */
- " xr %0,1\n"
+ " lhi %2,31\n"
+ " nr %2,%0\n" /* make shift value */
+ " xr %0,%2\n"
" srl %0,3\n"
+ " lhi %3,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
- " lhi 2,1\n"
- " sll 2,0(1)\n" /* make OR mask */
+ " sll %3,0(%2)\n" /* make OR mask */
" l %0,0(%1)\n"
- "0: lr 1,%0\n" /* CS loop starts here */
- " xr 1,2\n" /* change bit */
- " cs %0,1,0(%1)\n"
+ "0: lr %2,%0\n" /* CS loop starts here */
+ " xr %2,%3\n" /* change bit */
+ " cs %0,%2,0(%1)\n"
" jl 0b\n"
- " nr %0,2\n" /* isolate old bit */
- : "+a" (nr), "+a" (addr) :
- : "cc", "memory", "1", "2" );
- return nr;
+ " nr %0,%3\n" /* isolate old bit */
+ : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
+ : "cc", "memory" );
+ return nr != 0;
}
#endif /* CONFIG_SMP */
@@ -240,17 +246,18 @@
*/
static __inline__ void __set_bit(int nr, volatile void * addr)
{
+ unsigned long reg1, reg2;
__asm__ __volatile__(
- " lhi 2,24\n"
- " lhi 1,7\n"
- " xr 2,%0\n"
- " nr 1,%0\n"
- " srl 2,3\n"
- " la 2,0(2,%1)\n"
- " la 1,0(1,%2)\n"
- " oc 0(1,2),0(1)"
- : : "r" (nr), "a" (addr), "a" (&_oi_bitmap)
- : "cc", "memory", "1", "2" );
+ " lhi %1,24\n"
+ " lhi %0,7\n"
+ " xr %1,%2\n"
+ " nr %0,%2\n"
+ " srl %1,3\n"
+ " la %1,0(%1,%3)\n"
+ " la %0,0(%0,%4)\n"
+ " oc 0(1,%1),0(%0)"
+ : "=&a" (reg1), "=&a" (reg2)
+ : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
}
static __inline__ void
@@ -319,17 +326,18 @@
static __inline__ void
__clear_bit(int nr, volatile void * addr)
{
+ unsigned long reg1, reg2;
__asm__ __volatile__(
- " lhi 2,24\n"
- " lhi 1,7\n"
- " xr 2,%0\n"
- " nr 1,%0\n"
- " srl 2,3\n"
- " la 2,0(2,%1)\n"
- " la 1,0(1,%2)\n"
- " nc 0(1,2),0(1)"
- : : "r" (nr), "a" (addr), "a" (&_ni_bitmap)
- : "cc", "memory", "1", "2" );
+ " lhi %1,24\n"
+ " lhi %0,7\n"
+ " xr %1,%2\n"
+ " nr %0,%2\n"
+ " srl %1,3\n"
+ " la %1,0(%1,%3)\n"
+ " la %0,0(%0,%4)\n"
+ " nc 0(1,%1),0(%0)"
+ : "=&a" (reg1), "=&a" (reg2)
+ : "r" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
}
static __inline__ void
@@ -397,17 +405,18 @@
*/
static __inline__ void __change_bit(int nr, volatile void * addr)
{
+ unsigned long reg1, reg2;
__asm__ __volatile__(
- " lhi 2,24\n"
- " lhi 1,7\n"
- " xr 2,%0\n"
- " nr 1,%0\n"
- " srl 2,3\n"
- " la 2,0(2,%1)\n"
- " la 1,0(1,%2)\n"
- " xc 0(1,2),0(1)"
- : : "r" (nr), "a" (addr), "a" (&_oi_bitmap)
- : "cc", "memory", "1", "2" );
+ " lhi %1,24\n"
+ " lhi %0,7\n"
+ " xr %1,%2\n"
+ " nr %0,%2\n"
+ " srl %1,3\n"
+ " la %1,0(%1,%3)\n"
+ " la %0,0(%0,%4)\n"
+ " xc 0(1,%1),0(%0)"
+ : "=&a" (reg1), "=&a" (reg2)
+ : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
}
static __inline__ void
@@ -475,24 +484,22 @@
*/
static __inline__ int test_and_set_bit_simple(int nr, volatile void * addr)
{
- static const int mask = 1;
+ unsigned long reg1, reg2;
int oldbit;
__asm__ __volatile__(
- " lhi 1,24\n"
- " lhi 2,7\n"
- " xr 1,%1\n"
- " nr 2,1\n"
- " srl 1,3\n"
- " la 1,0(1,%2)\n"
- " ic %0,0(1)\n"
- " srl %0,0(2)\n"
- " n %0,%4\n"
- " la 2,0(2,%3)\n"
- " oc 0(1,1),0(2)"
- : "=d&" (oldbit) : "r" (nr), "a" (addr),
- "a" (&_oi_bitmap), "m" (mask)
- : "cc", "memory", "1", "2" );
- return oldbit;
+ " lhi %1,24\n"
+ " lhi %2,7\n"
+ " xr %1,%3\n"
+ " nr %2,%3\n"
+ " srl %1,3\n"
+ " la %1,0(%1,%4)\n"
+ " ic %0,0(%1)\n"
+ " srl %0,0(%2)\n"
+ " la %2,0(%2,%5)\n"
+ " oc 0(1,%1),0(%2)"
+ : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
+ : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
+ return oldbit & 1;
}
#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
@@ -501,25 +508,23 @@
*/
static __inline__ int test_and_clear_bit_simple(int nr, volatile void * addr)
{
- static const int mask = 1;
+ unsigned long reg1, reg2;
int oldbit;
__asm__ __volatile__(
- " lhi 1,24\n"
- " lhi 2,7\n"
- " xr 1,%1\n"
- " nr 2,1\n"
- " srl 1,3\n"
- " la 1,0(1,%2)\n"
- " ic %0,0(1)\n"
- " srl %0,0(2)\n"
- " n %0,%4\n"
- " la 2,0(2,%3)\n"
- " nc 0(1,1),0(2)"
- : "=d&" (oldbit) : "r" (nr), "a" (addr),
- "a" (&_ni_bitmap), "m" (mask)
- : "cc", "memory", "1", "2" );
- return oldbit;
+ " lhi %1,24\n"
+ " lhi %2,7\n"
+ " xr %1,%3\n"
+ " nr %2,%3\n"
+ " srl %1,3\n"
+ " la %1,0(%1,%4)\n"
+ " ic %0,0(%1)\n"
+ " srl %0,0(%2)\n"
+ " la %2,0(%2,%5)\n"
+ " nc 0(1,%1),0(%2)"
+ : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
+ : "r" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
+ return oldbit & 1;
}
#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
@@ -528,25 +533,23 @@
*/
static __inline__ int test_and_change_bit_simple(int nr, volatile void * addr)
{
- static const int mask = 1;
+ unsigned long reg1, reg2;
int oldbit;
__asm__ __volatile__(
- " lhi 1,24\n"
- " lhi 2,7\n"
- " xr 1,%1\n"
- " nr 2,1\n"
- " srl 1,3\n"
- " la 1,0(1,%2)\n"
- " ic %0,0(1)\n"
- " srl %0,0(2)\n"
- " n %0,%4\n"
- " la 2,0(2,%3)\n"
- " xc 0(1,1),0(2)"
- : "=d&" (oldbit) : "r" (nr), "a" (addr),
- "a" (&_oi_bitmap), "m" (mask)
- : "cc", "memory", "1", "2" );
- return oldbit;
+ " lhi %1,24\n"
+ " lhi %2,7\n"
+ " xr %1,%3\n"
+ " nr %2,%1\n"
+ " srl %1,3\n"
+ " la %1,0(%1,%4)\n"
+ " ic %0,0(%1)\n"
+ " srl %0,0(%2)\n"
+ " la %2,0(%2,%5)\n"
+ " xc 0(1,%1),0(%2)"
+ : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
+ : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
+ return oldbit & 1;
}
#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
@@ -573,22 +576,20 @@
static __inline__ int __test_bit(int nr, volatile void * addr)
{
- static const int mask = 1;
+ unsigned long reg1, reg2;
int oldbit;
__asm__ __volatile__(
- " lhi 2,24\n"
- " lhi 1,7\n"
- " xr 2,%1\n"
- " nr 1,%1\n"
- " srl 2,3\n"
- " ic %0,0(2,%2)\n"
- " srl %0,0(1)\n"
- " n %0,%3"
- : "=d&" (oldbit) : "r" (nr), "a" (addr),
- "m" (mask)
- : "cc", "1", "2" );
- return oldbit;
+ " lhi %2,24\n"
+ " lhi %1,7\n"
+ " xr %2,%3\n"
+ " nr %1,%3\n"
+ " srl %2,3\n"
+ " ic %0,0(%2,%4)\n"
+ " srl %0,0(%1)"
+ : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
+ : "r" (nr), "a" (addr) : "cc" );
+ return oldbit & 1;
}
static __inline__ int __constant_test_bit(int nr, volatile void * addr) {
@@ -605,48 +606,46 @@
*/
static __inline__ int find_first_zero_bit(void * addr, unsigned size)
{
- static const int mask = 0xffL;
+ unsigned long cmp, count;
int res;
if (!size)
return 0;
- __asm__(" lhi 0,-1\n"
- " lr 1,%1\n"
- " ahi 1,31\n"
- " srl 1,5\n"
- " sr 2,2\n"
- "0: c 0,0(2,%2)\n"
+ __asm__(" lhi %1,-1\n"
+ " lr %2,%3\n"
+ " slr %0,%0\n"
+ " ahi %2,31\n"
+ " srl %2,5\n"
+ "0: c %1,0(%0,%4)\n"
" jne 1f\n"
- " ahi 2,4\n"
- " brct 1,0b\n"
- " lr 2,%1\n"
+ " ahi %0,4\n"
+ " brct %2,0b\n"
+ " lr %0,%3\n"
" j 4f\n"
- "1: l 1,0(2,%2)\n"
- " sll 2,3\n"
- " tml 1,0xFFFF\n"
+ "1: l %2,0(%0,%4)\n"
+ " sll %0,3\n"
+ " lhi %1,0xff\n"
+ " tml %2,0xffff\n"
" jno 2f\n"
- " ahi 2,16\n"
- " srl 1,16\n"
- "2: tml 1,0x00FF\n"
+ " ahi %0,16\n"
+ " srl %2,16\n"
+ "2: tml %2,0x00ff\n"
" jno 3f\n"
- " ahi 2,8\n"
- " srl 1,8\n"
- "3: n 1,%3\n"
- " ic 1,0(1,%4)\n"
- " n 1,%3\n"
- " ar 2,1\n"
- "4: lr %0,2"
- : "=d" (res) : "a" (size), "a" (addr),
- "m" (mask), "a" (&_zb_findmap)
- : "cc", "0", "1", "2" );
+ " ahi %0,8\n"
+ " srl %2,8\n"
+ "3: nr %2,%1\n"
+ " ic %2,0(%2,%5)\n"
+ " alr %0,%2\n"
+ "4:"
+ : "=&a" (res), "=&d" (cmp), "=&a" (count)
+ : "a" (size), "a" (addr), "a" (&_zb_findmap) : "cc" );
return (res < size) ? res : size;
}
static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
{
- static const int mask = 0xffL;
unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long bitvec;
+ unsigned long bitvec, reg;
int set, bit = offset & 31, res;
if (bit) {
@@ -654,23 +653,21 @@
* Look for zero in first word
*/
bitvec = (*p) >> bit;
- __asm__(" lr 1,%1\n"
- " sr %0,%0\n"
- " tml 1,0xFFFF\n"
+ __asm__(" slr %0,%0\n"
+ " lhi %2,0xff\n"
+ " tml %1,0xffff\n"
" jno 0f\n"
" ahi %0,16\n"
- " srl 1,16\n"
- "0: tml 1,0x00FF\n"
+ " srl %1,16\n"
+ "0: tml %1,0x00ff\n"
" jno 1f\n"
" ahi %0,8\n"
- " srl 1,8\n"
- "1: n 1,%2\n"
- " ic 1,0(1,%3)\n"
- " n 1,%2\n"
- " ar %0,1"
- : "=d&" (set) : "d" (bitvec),
- "m" (mask), "a" (&_zb_findmap)
- : "cc", "1" );
+ " srl %1,8\n"
+ "1: nr %1,%2\n"
+ " ic %1,0(%1,%3)\n"
+ " alr %0,%1"
+ : "=&d" (set), "+a" (bitvec), "=&d" (reg)
+ : "a" (&_zb_findmap) : "cc" );
if (set < (32 - bit))
return set + offset;
offset += 32 - bit;
@@ -689,27 +686,24 @@
*/
static __inline__ unsigned long ffz(unsigned long word)
{
- static const int mask = 0xffL;
+ unsigned long reg;
int result;
- __asm__(" lr 1,%1\n"
- " sr %0,%0\n"
- " tml 1,0xFFFF\n"
+ __asm__(" slr %0,%0\n"
+ " lhi %2,0xff\n"
+ " tml %1,0xffff\n"
" jno 0f\n"
" ahi %0,16\n"
- " srl 1,16\n"
- "0: tml 1,0x00FF\n"
+ " srl %1,16\n"
+ "0: tml %1,0x00ff\n"
" jno 1f\n"
" ahi %0,8\n"
- " srl 1,8\n"
- "1: n 1,%2\n"
- " ic 1,0(1,%3)\n"
- " n 1,%2\n"
- " ar %0,1"
- : "=d&" (result) : "d" (word),
- "m" (mask), "a" (&_zb_findmap)
- : "cc", "1" );
-
+ " srl %1,8\n"
+ "1: nr %1,%2\n"
+ " ic %1,0(%1,%3)\n"
+ " alr %0,%1"
+ : "=&d" (result), "+a" (word), "=&d" (reg)
+ : "a" (&_zb_findmap) : "cc" );
return result;
}
@@ -725,29 +719,28 @@
if (x == 0)
return 0;
- __asm__(" lr %%r1,%1\n"
- " sr %0,%0\n"
- " tml %%r1,0xFFFF\n"
+ __asm__(" slr %0,%0\n"
+ " tml %1,0xffff\n"
" jnz 0f\n"
" ahi %0,16\n"
- " srl %%r1,16\n"
- "0: tml %%r1,0x00FF\n"
+ " srl %1,16\n"
+ "0: tml %1,0x00ff\n"
" jnz 1f\n"
" ahi %0,8\n"
- " srl %%r1,8\n"
- "1: tml %%r1,0x000F\n"
+ " srl %1,8\n"
+ "1: tml %1,0x000f\n"
" jnz 2f\n"
" ahi %0,4\n"
- " srl %%r1,4\n"
- "2: tml %%r1,0x0003\n"
+ " srl %1,4\n"
+ "2: tml %1,0x0003\n"
" jnz 3f\n"
" ahi %0,2\n"
- " srl %%r1,2\n"
- "3: tml %%r1,0x0001\n"
+ " srl %1,2\n"
+ "3: tml %1,0x0001\n"
" jnz 4f\n"
" ahi %0,1\n"
"4:"
- : "=&d" (r) : "d" (x) : "cc", "1" );
+ : "=&d" (r), "+d" (x) : : "cc" );
return r+1;
}
@@ -778,40 +771,40 @@
#define ext2_test_bit(nr, addr) test_bit((nr)^24, addr)
static __inline__ int ext2_find_first_zero_bit(void *vaddr, unsigned size)
{
+ unsigned long cmp, count;
int res;
if (!size)
return 0;
- __asm__(" lhi 0,-1\n"
- " lr 1,%1\n"
- " ahi 1,31\n"
- " srl 1,5\n"
- " sr 2,2\n"
- "0: c 0,0(2,%2)\n"
+ __asm__(" lhi %1,-1\n"
+ " lr %2,%3\n"
+ " ahi %2,31\n"
+ " srl %2,5\n"
+ " slr %0,%0\n"
+ "0: cl %1,0(%0,%4)\n"
" jne 1f\n"
- " ahi 2,4\n"
- " brct 1,0b\n"
- " lr 2,%1\n"
+ " ahi %0,4\n"
+ " brct %2,0b\n"
+ " lr %0,%3\n"
" j 4f\n"
- "1: l 1,0(2,%2)\n"
- " sll 2,3\n"
- " lhi 0,0xff\n"
- " ahi 2,24\n"
- " tmh 1,0xFFFF\n"
+ "1: l %2,0(%0,%4)\n"
+ " sll %0,3\n"
+ " ahi %0,24\n"
+ " lhi %1,0xff\n"
+ " tmh %2,0xffff\n"
" jo 2f\n"
- " ahi 2,-16\n"
- " srl 1,16\n"
- "2: tml 1,0xFF00\n"
+ " ahi %0,-16\n"
+ " srl %2,16\n"
+ "2: tml %2,0xff00\n"
" jo 3f\n"
- " ahi 2,-8\n"
- " srl 1,8\n"
- "3: nr 1,0\n"
- " ic 1,0(1,%3)\n"
- " ar 2,1\n"
- "4: lr %0,2"
- : "=d" (res) : "a" (size), "a" (vaddr),
- "a" (&_zb_findmap)
- : "cc", "0", "1", "2" );
+ " ahi %0,-8\n"
+ " srl %2,8\n"
+ "3: nr %2,%1\n"
+ " ic %2,0(%2,%5)\n"
+ " alr %0,%2\n"
+ "4:"
+ : "=&a" (res), "=&d" (cmp), "=&a" (count)
+ : "a" (size), "a" (vaddr), "a" (&_zb_findmap) : "cc" );
return (res < size) ? res : size;
}
@@ -820,7 +813,7 @@
{
unsigned long *addr = vaddr;
unsigned long *p = addr + (offset >> 5);
- unsigned long word;
+ unsigned long word, reg;
int bit = offset & 31UL, res;
if (offset >= size)
@@ -835,21 +828,20 @@
word >>= bit;
res = bit;
/* Look for zero in first longword */
- __asm__(" lhi 0,0xff\n"
+ __asm__(" lhi %2,0xff\n"
" tml %1,0xffff\n"
- " jno 0f\n"
+ " jno 0f\n"
" ahi %0,16\n"
" srl %1,16\n"
"0: tml %1,0x00ff\n"
" jno 1f\n"
" ahi %0,8\n"
" srl %1,8\n"
- "1: nr %1,0\n"
- " ic %1,0(%1,%2)\n"
+ "1: nr %1,%2\n"
+ " ic %1,0(%1,%3)\n"
" alr %0,%1"
- : "+&d" (res), "+&a" (word)
- : "a" (&_zb_findmap)
- : "cc", "0" );
+ : "+&d" (res), "+&a" (word), "=&d" (reg)
+ : "a" (&_zb_findmap) : "cc" );
if (res < 32)
return (p - addr)*32 + res;
p++;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)