···3030 return sum;3131}32323333-/*3434- * Computes the checksum of a memory block at buff, length len,3535- * and adds in "sum" (32-bit).3636- *3737- * Returns a 32-bit number suitable for feeding into itself3838- * or csum_tcpudp_magic.3939- *4040- * This function must be called with even lengths, except4141- * for the last fragment, which may be odd.4242- *4343- * It's best to have buff aligned on a 32-bit boundary.4444- */4545-static inline __wsum csum_partial(const void *buff, int len, __wsum sum)4646-{4747- return cksm(buff, len, sum);4848-}3333+__wsum csum_partial(const void *buff, int len, __wsum sum);49345035/*5136 * Fold a partial checksum without adding pseudo headers.
···11+// SPDX-License-Identifier: GPL-2.022+33+#include <linux/export.h>44+#include <asm/checksum.h>55+#include <asm/fpu.h>66+77+/*88+ * Computes the checksum of a memory block at buff, length len,99+ * and adds in "sum" (32-bit).1010+ *1111+ * Returns a 32-bit number suitable for feeding into itself1212+ * or csum_tcpudp_magic.1313+ *1414+ * This function must be called with even lengths, except1515+ * for the last fragment, which may be odd.1616+ *1717+ * It's best to have buff aligned on a 64-bit boundary.1818+ */1919+__wsum csum_partial(const void *buff, int len, __wsum sum)2020+{2121+ DECLARE_KERNEL_FPU_ONSTACK8(vxstate);2222+2323+ if (!cpu_has_vx())2424+ return cksm(buff, len, sum);2525+ kernel_fpu_begin(&vxstate, KERNEL_VXR_V16V23);2626+ fpu_vlvgf(16, (__force u32)sum, 1);2727+ fpu_vzero(17);2828+ fpu_vzero(18);2929+ fpu_vzero(19);3030+ while (len >= 64) {3131+ fpu_vlm(20, 23, buff);3232+ fpu_vcksm(16, 20, 16);3333+ fpu_vcksm(17, 21, 17);3434+ fpu_vcksm(18, 22, 18);3535+ fpu_vcksm(19, 23, 19);3636+ buff += 64;3737+ len -= 64;3838+ }3939+ while (len >= 32) {4040+ fpu_vlm(20, 21, buff);4141+ fpu_vcksm(16, 20, 16);4242+ fpu_vcksm(17, 21, 17);4343+ buff += 32;4444+ len -= 32;4545+ }4646+ while (len >= 16) {4747+ fpu_vl(20, buff);4848+ fpu_vcksm(16, 20, 16);4949+ buff += 16;5050+ len -= 16;5151+ }5252+ if (len) {5353+ fpu_vll(20, len - 1, buff);5454+ fpu_vcksm(16, 20, 16);5555+ }5656+ fpu_vcksm(18, 19, 18);5757+ fpu_vcksm(16, 17, 16);5858+ fpu_vcksm(16, 18, 16);5959+ sum = (__force __wsum)fpu_vlgvf(16, 1);6060+ kernel_fpu_end(&vxstate, KERNEL_VXR_V16V23);6161+ return sum;6262+}6363+EXPORT_SYMBOL(csum_partial);