patch-2.1.44 linux/include/asm-mips/checksum.h
Next file: linux/include/asm-mips/cpu.h
Previous file: linux/include/asm-mips/cacheops.h
Back to the patch index
Back to the overall index
- Lines: 245
- Date:
Mon Jul 7 08:18:55 1997
- Orig file:
v2.1.43/linux/include/asm-mips/checksum.h
- Orig date:
Sun Nov 3 01:04:41 1996
diff -u --recursive --new-file v2.1.43/linux/include/asm-mips/checksum.h linux/include/asm-mips/checksum.h
@@ -31,15 +31,44 @@
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
+unsigned int csum_partial_copy(const char *src, char *dst, int len, unsigned int sum);
/*
- * the same as csum_partial, but copies from user space (but on the alpha
+ * the same as csum_partial, but copies from user space (but on MIPS
* we have just one address space, so this is identical to the above)
+ *
+ * this is obsolete and will go away.
*/
#define csum_partial_copy_fromuser csum_partial_copy
/*
+ * this is a new version of the above that records errors it finds in *errp,
+ * but continues and zeros the rest of the buffer.
+ */
+unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *errp);
+
+/*
+ * Fold a partial checksum without adding pseudo headers
+ */
+static inline unsigned short int csum_fold(unsigned int sum)
+{
+ __asm__("
+ .set noat
+ sll $1,%0,16
+ addu %0,$1
+ sltu $1,%0,$1
+ srl %0,%0,16
+ addu %0,$1
+ xori %0,0xffff
+ .set at"
+ : "=r" (sum)
+ : "0" (sum)
+ : "$1");
+
+ return sum;
+}
+
+/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
@@ -49,54 +78,48 @@
static inline unsigned short ip_fast_csum(unsigned char * iph,
unsigned int ihl)
{
- unsigned short int sum;
- unsigned long dummy1, dummy2;
+ unsigned int sum;
+ unsigned long dummy;
/*
- * This is optimized for 32-bit MIPS processors.
- * I tried it in plain C but the generated code looks to bad to
- * use with old first generation MIPS CPUs.
- * Using 64-bit code could even further improve these routines.
+ * This is for 32-bit MIPS processors.
*/
- __asm__("
+ __asm__ __volatile__("
.set noreorder
.set noat
- lw %0,(%3)
- subu %1,4
- blez %1,2f
- sll %1,%4,2 # delay slot
- lw %2,4(%3)
- addu %1,%3 # delay slot
- addu %0,%2
- sltu $1,%0,%2
- lw %2,8(%3)
+ lw %0,(%1)
+ subu %2,4
+ #blez %2,2f
+ sll %2,2 # delay slot
+
+ lw %3,4(%1)
+ addu %2,%1 # delay slot
+ addu %0,%3
+ sltu $1,%0,%3
+ lw %3,8(%1)
addu %0,$1
- addu %0,%2
- sltu $1,%0,%2
- lw %2,12(%3)
+ addu %0,%3
+ sltu $1,%0,%3
+ lw %3,12(%1)
addu %0,$1
- addu %0,%2
- sltu $1,%0,%2
+ addu %0,%3
+ sltu $1,%0,%3
addu %0,$1
-1: lw %2,16(%3)
- addu %1,4
- addu %0,%2
- sltu $1,%0,%2
- bne %1,%3,1b
+
+1: lw %3,16(%1)
+ addiu %1,4
+ addu %0,%3
+ sltu $1,%0,%3
+ bne %2,%1,1b
addu %0,$1 # delay slot
- srl $1,%0,16
- addu %0,$1
- sltu $1,%0,$1
- addu %0,$1
- nor %0,$0,%0
- andi %0,0xffff
+
2: .set at
.set reorder"
- : "=r" (sum), "=r" (dummy1), "=r" (dummy2)
- : "r" (iph), "r"(ihl)
+ : "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (dummy)
+ : "1" (iph), "2" (ihl)
: "$1");
- return sum;
+ return csum_fold(sum);
}
/*
@@ -114,66 +137,38 @@
addu %0,%2
sltu $1,%0,%2
addu %0,$1
+
addu %0,%3
sltu $1,%0,%3
addu %0,$1
+
addu %0,%4
sltu $1,%0,%4
addu %0,$1
- srl $1,%0,16
- addu %0,$1
- sltu $1,%0,$1
- addu %0,$1
- nor %0,$0,%0
- andi %0,0xffff
.set at"
: "=r" (sum)
- : "0" (daddr), "r"(saddr), "r"((ntohs(len)<<16)+proto*256), "r"(sum)
+ : "0" (daddr), "r"(saddr),
+#ifdef __MIPSEL__
+ "r" ((ntohs(len)<<16)+proto*256),
+#else
+ "r" (((proto)<<16)+len),
+#endif
+ "r"(sum)
: "$1");
- return (unsigned short)sum;
+ return csum_fold(sum);
}
/*
- * Fold a partial checksum without adding pseudo headers
- */
-static inline unsigned short int csum_fold(unsigned int sum)
-{
- __asm__("
- .set noat
- srl $1,%0,16
- addu %0,$1
- sltu $1,%0,$1
- nor %0,$0,%0
- andi %0,0xffff
- .set at"
- : "=r"(sum)
- : "0" (sum)
- : "$1");
-
- return sum;
-}
-
-/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
- unsigned short int sum;
-
- __asm__("
- .set noat
- srl $1,%0,16
- addu %0,$1
- sltu $1,%0,$1
- nor %0,$0,%0
- andi %0,0xffff
- .set at"
- : "=r"(sum)
- : "r" (csum_partial(buff, len, 0))
- : "$1");
+static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+{
+ unsigned int sum;
- return sum;
+ sum = csum_partial(buff, len, 0);
+ return csum_fold(sum);
}
#define _HAVE_ARCH_IPV6_CSUM
@@ -183,9 +178,7 @@
unsigned short proto,
unsigned int sum)
{
- unsigned long scratch;
-
- __asm__("
+ __asm__("
.set noreorder
.set noat
addu %0,%5 # proto (long in network byte order)
@@ -234,14 +227,13 @@
addu %0,%1
sltu $1,%0,$1
.set noat
- .set noreorder
- "
- : "=r" (sum),
- "=r" (scratch)
- : "r" (saddr),
+ .set noreorder"
+ : "=r" (sum),
+ "=r" (proto)
+ : "r" (saddr),
"r" (daddr),
- "0" (htonl((__u32) (len))),
- "r" (htonl(proto)),
+ "0" (htonl((__u32) (len))),
+ "1" (htonl(proto)),
"r"(sum)
: "$1");
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov