···11-#ifdef __uClinux__22-#include "checksum_no.h"11+#ifndef _M68K_CHECKSUM_H22+#define _M68K_CHECKSUM_H33+44+#include <linux/in6.h>55+66+/*77+ * computes the checksum of a memory block at buff, length len,88+ * and adds in "sum" (32-bit)99+ *1010+ * returns a 32-bit number suitable for feeding into itself1111+ * or csum_tcpudp_magic1212+ *1313+ * this function must be called with even lengths, except1414+ * for the last fragment, which may be odd1515+ *1616+ * it's best to have buff aligned on a 32-bit boundary1717+ */1818+__wsum csum_partial(const void *buff, int len, __wsum sum);1919+2020+/*2121+ * the same as csum_partial, but copies from src while it2222+ * checksums2323+ *2424+ * here even more important to align src and dst on a 32-bit (or even2525+ * better 64-bit) boundary2626+ */2727+2828+extern __wsum csum_partial_copy_from_user(const void __user *src,2929+ void *dst,3030+ int len, __wsum sum,3131+ int *csum_err);3232+3333+extern __wsum csum_partial_copy_nocheck(const void *src,3434+ void *dst, int len,3535+ __wsum sum);3636+3737+3838+#ifdef CONFIG_COLDFIRE3939+4040+/*4141+ * The ColdFire cores don't support all the 68k instructions used4242+ * in the optimized checksum code below. So it reverts back to using4343+ * more standard C coded checksums. The fast checksum code is4444+ * significantly larger than the optimized version, so it is not4545+ * inlined here.4646+ */4747+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);4848+4949+static inline __sum16 csum_fold(__wsum sum)5050+{5151+ unsigned int tmp = (__force u32)sum;5252+5353+ tmp = (tmp & 0xffff) + (tmp >> 16);5454+ tmp = (tmp & 0xffff) + (tmp >> 16);5555+5656+ return (__force __sum16)~tmp;5757+}5858+359#else44-#include "checksum_mm.h"55-#endif6060+6161+/*6262+ * This is a version of ip_fast_csum() optimized for IP headers,6363+ * which always checksum on 4 octet boundaries.6464+ */6565+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)6666+{6767+ unsigned int sum = 0;6868+ unsigned long tmp;6969+7070+ __asm__ ("subqw #1,%2\n"7171+ "1:\t"7272+ "movel %1@+,%3\n\t"7373+ "addxl %3,%0\n\t"7474+ "dbra %2,1b\n\t"7575+ "movel %0,%3\n\t"7676+ "swap %3\n\t"7777+ "addxw %3,%0\n\t"7878+ "clrw %3\n\t"7979+ "addxw %3,%0\n\t"8080+ : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp)8181+ : "0" (sum), "1" (iph), "2" (ihl)8282+ : "memory");8383+ return (__force __sum16)~sum;8484+}8585+8686+static inline __sum16 csum_fold(__wsum sum)8787+{8888+ unsigned int tmp = (__force u32)sum;8989+9090+ __asm__("swap %1\n\t"9191+ "addw %1, %0\n\t"9292+ "clrw %1\n\t"9393+ "addxw %1, %0"9494+ : "=&d" (sum), "=&d" (tmp)9595+ : "0" (sum), "1" (tmp));9696+9797+ return (__force __sum16)~sum;9898+}9999+100100+#endif /* CONFIG_COLDFIRE */101101+102102+static inline __wsum103103+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,104104+ unsigned short proto, __wsum sum)105105+{106106+ __asm__ ("addl %2,%0\n\t"107107+ "addxl %3,%0\n\t"108108+ "addxl %4,%0\n\t"109109+ "clrl %1\n\t"110110+ "addxl %1,%0"111111+ : "=&d" (sum), "=d" (saddr)112112+ : "g" (daddr), "1" (saddr), "d" (len + proto),113113+ "0" (sum));114114+ return sum;115115+}116116+117117+118118+/*119119+ * computes the checksum of the TCP/UDP pseudo-header120120+ * returns a 16-bit checksum, already complemented121121+ */122122+static inline __sum16123123+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,124124+ unsigned short proto, __wsum sum)125125+{126126+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));127127+}128128+129129+/*130130+ * this routine is used for miscellaneous IP-like checksums, mainly131131+ * in icmp.c132132+ */133133+134134+static inline __sum16 ip_compute_csum(const void *buff, int len)135135+{136136+ return csum_fold (csum_partial(buff, len, 0));137137+}138138+139139+#define _HAVE_ARCH_IPV6_CSUM140140+static __inline__ __sum16141141+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,142142+ __u32 len, unsigned short proto, __wsum sum)143143+{144144+ register unsigned long tmp;145145+ __asm__("addl %2@,%0\n\t"146146+ "movel %2@(4),%1\n\t"147147+ "addxl %1,%0\n\t"148148+ "movel %2@(8),%1\n\t"149149+ "addxl %1,%0\n\t"150150+ "movel %2@(12),%1\n\t"151151+ "addxl %1,%0\n\t"152152+ "movel %3@,%1\n\t"153153+ "addxl %1,%0\n\t"154154+ "movel %3@(4),%1\n\t"155155+ "addxl %1,%0\n\t"156156+ "movel %3@(8),%1\n\t"157157+ "addxl %1,%0\n\t"158158+ "movel %3@(12),%1\n\t"159159+ "addxl %1,%0\n\t"160160+ "addxl %4,%0\n\t"161161+ "clrl %1\n\t"162162+ "addxl %1,%0"163163+ : "=&d" (sum), "=&d" (tmp)164164+ : "a" (saddr), "a" (daddr), "d" (len + proto),165165+ "0" (sum));166166+167167+ return csum_fold(sum);168168+}169169+170170+#endif /* _M68K_CHECKSUM_H */
-148
arch/m68k/include/asm/checksum_mm.h
···11-#ifndef _M68K_CHECKSUM_H22-#define _M68K_CHECKSUM_H33-44-#include <linux/in6.h>55-66-/*77- * computes the checksum of a memory block at buff, length len,88- * and adds in "sum" (32-bit)99- *1010- * returns a 32-bit number suitable for feeding into itself1111- * or csum_tcpudp_magic1212- *1313- * this function must be called with even lengths, except1414- * for the last fragment, which may be odd1515- *1616- * it's best to have buff aligned on a 32-bit boundary1717- */1818-__wsum csum_partial(const void *buff, int len, __wsum sum);1919-2020-/*2121- * the same as csum_partial, but copies from src while it2222- * checksums2323- *2424- * here even more important to align src and dst on a 32-bit (or even2525- * better 64-bit) boundary2626- */2727-2828-extern __wsum csum_partial_copy_from_user(const void __user *src,2929- void *dst,3030- int len, __wsum sum,3131- int *csum_err);3232-3333-extern __wsum csum_partial_copy_nocheck(const void *src,3434- void *dst, int len,3535- __wsum sum);3636-3737-/*3838- * This is a version of ip_compute_csum() optimized for IP headers,3939- * which always checksum on 4 octet boundaries.4040- *4141- */4242-static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)4343-{4444- unsigned int sum = 0;4545- unsigned long tmp;4646-4747- __asm__ ("subqw #1,%2\n"4848- "1:\t"4949- "movel %1@+,%3\n\t"5050- "addxl %3,%0\n\t"5151- "dbra %2,1b\n\t"5252- "movel %0,%3\n\t"5353- "swap %3\n\t"5454- "addxw %3,%0\n\t"5555- "clrw %3\n\t"5656- "addxw %3,%0\n\t"5757- : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp)5858- : "0" (sum), "1" (iph), "2" (ihl)5959- : "memory");6060- return (__force __sum16)~sum;6161-}6262-6363-/*6464- * Fold a partial checksum6565- */6666-6767-static inline __sum16 csum_fold(__wsum sum)6868-{6969- unsigned int tmp = (__force u32)sum;7070- __asm__("swap %1\n\t"7171- "addw %1, %0\n\t"7272- "clrw %1\n\t"7373- "addxw %1, %0"7474- : "=&d" (sum), "=&d" (tmp)7575- : "0" (sum), "1" (tmp));7676- return (__force __sum16)~sum;7777-}7878-7979-8080-static inline __wsum8181-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,8282- unsigned short proto, __wsum sum)8383-{8484- __asm__ ("addl %2,%0\n\t"8585- "addxl %3,%0\n\t"8686- "addxl %4,%0\n\t"8787- "clrl %1\n\t"8888- "addxl %1,%0"8989- : "=&d" (sum), "=d" (saddr)9090- : "g" (daddr), "1" (saddr), "d" (len + proto),9191- "0" (sum));9292- return sum;9393-}9494-9595-9696-/*9797- * computes the checksum of the TCP/UDP pseudo-header9898- * returns a 16-bit checksum, already complemented9999- */100100-static inline __sum16101101-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,102102- unsigned short proto, __wsum sum)103103-{104104- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));105105-}106106-107107-/*108108- * this routine is used for miscellaneous IP-like checksums, mainly109109- * in icmp.c110110- */111111-112112-static inline __sum16 ip_compute_csum(const void *buff, int len)113113-{114114- return csum_fold (csum_partial(buff, len, 0));115115-}116116-117117-#define _HAVE_ARCH_IPV6_CSUM118118-static __inline__ __sum16119119-csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,120120- __u32 len, unsigned short proto, __wsum sum)121121-{122122- register unsigned long tmp;123123- __asm__("addl %2@,%0\n\t"124124- "movel %2@(4),%1\n\t"125125- "addxl %1,%0\n\t"126126- "movel %2@(8),%1\n\t"127127- "addxl %1,%0\n\t"128128- "movel %2@(12),%1\n\t"129129- "addxl %1,%0\n\t"130130- "movel %3@,%1\n\t"131131- "addxl %1,%0\n\t"132132- "movel %3@(4),%1\n\t"133133- "addxl %1,%0\n\t"134134- "movel %3@(8),%1\n\t"135135- "addxl %1,%0\n\t"136136- "movel %3@(12),%1\n\t"137137- "addxl %1,%0\n\t"138138- "addxl %4,%0\n\t"139139- "clrl %1\n\t"140140- "addxl %1,%0"141141- : "=&d" (sum), "=&d" (tmp)142142- : "a" (saddr), "a" (daddr), "d" (len + proto),143143- "0" (sum));144144-145145- return csum_fold(sum);146146-}147147-148148-#endif /* _M68K_CHECKSUM_H */
-132
arch/m68k/include/asm/checksum_no.h
···11-#ifndef _M68K_CHECKSUM_H22-#define _M68K_CHECKSUM_H33-44-#include <linux/in6.h>55-66-/*77- * computes the checksum of a memory block at buff, length len,88- * and adds in "sum" (32-bit)99- *1010- * returns a 32-bit number suitable for feeding into itself1111- * or csum_tcpudp_magic1212- *1313- * this function must be called with even lengths, except1414- * for the last fragment, which may be odd1515- *1616- * it's best to have buff aligned on a 32-bit boundary1717- */1818-__wsum csum_partial(const void *buff, int len, __wsum sum);1919-2020-/*2121- * the same as csum_partial, but copies from src while it2222- * checksums2323- *2424- * here even more important to align src and dst on a 32-bit (or even2525- * better 64-bit) boundary2626- */2727-2828-__wsum csum_partial_copy_nocheck(const void *src, void *dst,2929- int len, __wsum sum);3030-3131-3232-/*3333- * the same as csum_partial_copy, but copies from user space.3434- *3535- * here even more important to align src and dst on a 32-bit (or even3636- * better 64-bit) boundary3737- */3838-3939-extern __wsum csum_partial_copy_from_user(const void __user *src,4040- void *dst, int len, __wsum sum, int *csum_err);4141-4242-__sum16 ip_fast_csum(const void *iph, unsigned int ihl);4343-4444-/*4545- * Fold a partial checksum4646- */4747-4848-static inline __sum16 csum_fold(__wsum sum)4949-{5050- unsigned int tmp = (__force u32)sum;5151-#ifdef CONFIG_COLDFIRE5252- tmp = (tmp & 0xffff) + (tmp >> 16);5353- tmp = (tmp & 0xffff) + (tmp >> 16);5454- return (__force __sum16)~tmp;5555-#else5656- __asm__("swap %1\n\t"5757- "addw %1, %0\n\t"5858- "clrw %1\n\t"5959- "addxw %1, %0"6060- : "=&d" (sum), "=&d" (tmp)6161- : "0" (sum), "1" (sum));6262- return (__force __sum16)~sum;6363-#endif6464-}6565-6666-6767-/*6868- * computes the checksum of the TCP/UDP pseudo-header6969- * returns a 16-bit checksum, already complemented7070- */7171-7272-static inline __wsum7373-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,7474- unsigned short proto, __wsum sum)7575-{7676- __asm__ ("addl %1,%0\n\t"7777- "addxl %4,%0\n\t"7878- "addxl %5,%0\n\t"7979- "clrl %1\n\t"8080- "addxl %1,%0"8181- : "=&d" (sum), "=&d" (saddr)8282- : "0" (daddr), "1" (saddr), "d" (len + proto),8383- "d"(sum));8484- return sum;8585-}8686-8787-static inline __sum168888-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,8989- unsigned short proto, __wsum sum)9090-{9191- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));9292-}9393-9494-/*9595- * this routine is used for miscellaneous IP-like checksums, mainly9696- * in icmp.c9797- */9898-9999-extern __sum16 ip_compute_csum(const void *buff, int len);100100-101101-#define _HAVE_ARCH_IPV6_CSUM102102-static __inline__ __sum16103103-csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,104104- __u32 len, unsigned short proto, __wsum sum)105105-{106106- register unsigned long tmp;107107- __asm__("addl %2@,%0\n\t"108108- "movel %2@(4),%1\n\t"109109- "addxl %1,%0\n\t"110110- "movel %2@(8),%1\n\t"111111- "addxl %1,%0\n\t"112112- "movel %2@(12),%1\n\t"113113- "addxl %1,%0\n\t"114114- "movel %3@,%1\n\t"115115- "addxl %1,%0\n\t"116116- "movel %3@(4),%1\n\t"117117- "addxl %1,%0\n\t"118118- "movel %3@(8),%1\n\t"119119- "addxl %1,%0\n\t"120120- "movel %3@(12),%1\n\t"121121- "addxl %1,%0\n\t"122122- "addxl %4,%0\n\t"123123- "clrl %1\n\t"124124- "addxl %1,%0"125125- : "=&d" (sum), "=&d" (tmp)126126- : "a" (saddr), "a" (daddr), "d" (len + proto),127127- "0" (sum));128128-129129- return csum_fold(sum);130130-}131131-132132-#endif /* _M68K_CHECKSUM_H */
+2-9
arch/m68knommu/lib/checksum.c
···9292 return result;9393}94949595+#ifdef CONFIG_COLDFIRE9596/*9697 * This is a version of ip_compute_csum() optimized for IP headers,9798 * which always checksum on 4 octet boundaries.···101100{102101 return (__force __sum16)~do_csum(iph,ihl*4);103102}103103+#endif104104105105/*106106 * computes the checksum of a memory block at buff, length len,···127125}128126129127EXPORT_SYMBOL(csum_partial);130130-131131-/*132132- * this routine is used for miscellaneous IP-like checksums, mainly133133- * in icmp.c134134- */135135-__sum16 ip_compute_csum(const void *buff, int len)136136-{137137- return (__force __sum16)~do_csum(buff,len);138138-}139128140129/*141130 * copy from fs while checksumming, otherwise like csum_partial