2 * csum_partial_copy - do IP checksumming and copy
4 * (C) Copyright 1996 Linus Torvalds
5 * accelerated versions (and 21264 assembly versions ) contributed by
6 * Rick Gorton <rick.gorton@alpha-processor.com>
8 * Don't look at this too closely - you'll go mad. The things
9 * we do for performance..
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/uaccess.h>
18 __asm__ __volatile__("ldq_u %0,%1":"=r" (x):"m" (*(const unsigned long *)(y)))
21 __asm__ __volatile__("stq_u %1,%0":"=m" (*(unsigned long *)(y)):"r" (x))
23 #define extql(x,y,z) \
24 __asm__ __volatile__("extql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
26 #define extqh(x,y,z) \
27 __asm__ __volatile__("extqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
29 #define mskql(x,y,z) \
30 __asm__ __volatile__("mskql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
32 #define mskqh(x,y,z) \
33 __asm__ __volatile__("mskqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
35 #define insql(x,y,z) \
36 __asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
38 #define insqh(x,y,z) \
39 __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
42 #define __get_user_u(x,ptr) \
45 __asm__ __volatile__( \
49 : "=r"(x), "=r"(__guu_err) \
50 : "m"(__m(ptr)), "1"(0)); \
54 #define __put_user_u(x,ptr) \
57 __asm__ __volatile__( \
62 : "m"(__m(addr)), "rJ"(x), "0"(0)); \
67 static inline unsigned short from64to16(unsigned long x)
69 /* Using extract instructions is a bit more efficient
70 than the original shift/bitmask version. */
79 tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1];
81 /* Since the bits of tmp_v.sh[3] are going to always be zero,
82 we don't have to bother to add that in. */
83 out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1]
84 + (unsigned long) tmp_v.us[2];
86 /* Similarly, out_v.us[2] is always zero for the final add. */
87 return out_v.us[0] + out_v.us[1];
93 * Ok. This isn't fun, but this is the EASY case.
95 static inline unsigned long
96 csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
97 long len, unsigned long checksum,
100 unsigned long carry = 0;
105 err |= __get_user(word, src);
110 carry = checksum < word;
117 unsigned long word, tmp;
118 err |= __get_user(word, src);
120 mskql(word, len, word);
122 mskqh(tmp, len, tmp);
123 carry = checksum < word;
127 if (err && errp) *errp = err;
132 * This is even less fun, but this is still reasonably
135 static inline unsigned long
136 csum_partial_cfu_dest_aligned(const unsigned long __user *src,
139 long len, unsigned long checksum,
143 unsigned long word, carry;
144 unsigned long lastsrc = 7+len+(unsigned long)src;
147 err |= __get_user_u(first,src);
150 unsigned long second;
152 err |= __get_user_u(second, src+1);
153 extql(first, soff, word);
156 extqh(second, soff, first);
163 carry = checksum < word;
169 unsigned long second;
170 err |= __get_user_u(second, lastsrc);
172 extql(first, soff, word);
173 extqh(second, soff, first);
175 mskql(word, len, word);
177 mskqh(tmp, len, tmp);
178 carry = checksum < word;
182 if (err && errp) *errp = err;
187 * This is slightly less fun than the above..
189 static inline unsigned long
190 csum_partial_cfu_src_aligned(const unsigned long __user *src,
193 long len, unsigned long checksum,
194 unsigned long partial_dest,
197 unsigned long carry = 0;
199 unsigned long second_dest;
202 mskql(partial_dest, doff, partial_dest);
204 err |= __get_user(word, src);
206 insql(word, doff, second_dest);
208 stq_u(partial_dest | second_dest, dst);
211 insqh(word, doff, partial_dest);
212 carry = checksum < word;
218 err |= __get_user(word, src);
219 mskql(word, len, word);
222 insql(word, doff, second_dest);
224 carry = checksum < word;
225 partial_dest |= second_dest;
227 stq_u(partial_dest, dst);
230 insqh(word, doff, partial_dest);
234 ldq_u(second_dest, dst);
235 mskqh(second_dest, doff, second_dest);
236 stq_u(partial_dest | second_dest, dst);
239 if (err && errp) *errp = err;
244 * This is so totally un-fun that it's frightening. Don't
245 * look at this too closely, you'll go blind.
247 static inline unsigned long
248 csum_partial_cfu_unaligned(const unsigned long __user * src,
250 unsigned long soff, unsigned long doff,
251 long len, unsigned long checksum,
252 unsigned long partial_dest,
255 unsigned long carry = 0;
257 unsigned long lastsrc;
260 err |= __get_user_u(first, src);
261 lastsrc = 7+len+(unsigned long)src;
262 mskql(partial_dest, doff, partial_dest);
264 unsigned long second, word;
265 unsigned long second_dest;
267 err |= __get_user_u(second, src+1);
268 extql(first, soff, word);
271 extqh(second, soff, first);
275 insql(word, doff, second_dest);
277 stq_u(partial_dest | second_dest, dst);
278 carry = checksum < word;
279 insqh(word, doff, partial_dest);
285 unsigned long second, word;
286 unsigned long second_dest;
288 err |= __get_user_u(second, lastsrc);
289 extql(first, soff, word);
290 extqh(second, soff, first);
293 mskql(word, len-doff, word);
295 insql(word, doff, second_dest);
296 carry = checksum < word;
297 stq_u(partial_dest | second_dest, dst);
299 ldq_u(second_dest, dst+1);
300 insqh(word, doff, partial_dest);
301 mskqh(second_dest, len, second_dest);
302 stq_u(partial_dest | second_dest, dst+1);
306 unsigned long second, word;
307 unsigned long second_dest;
309 err |= __get_user_u(second, lastsrc);
310 extql(first, soff, word);
311 extqh(second, soff, first);
313 ldq_u(second_dest, dst);
314 mskql(word, len-doff, word);
316 mskqh(second_dest, len, second_dest);
317 carry = checksum < word;
318 insql(word, doff, word);
319 stq_u(partial_dest | word | second_dest, dst);
322 if (err && errp) *errp = err;
327 csum_partial_copy_from_user(const void __user *src, void *dst, int len,
328 __wsum sum, int *errp)
330 unsigned long checksum = (__force u32) sum;
331 unsigned long soff = 7 & (unsigned long) src;
332 unsigned long doff = 7 & (unsigned long) dst;
335 if (!access_ok(VERIFY_READ, src, len)) {
336 if (errp) *errp = -EFAULT;
342 checksum = csum_partial_cfu_aligned(
343 (const unsigned long __user *) src,
344 (unsigned long *) dst,
345 len-8, checksum, errp);
347 checksum = csum_partial_cfu_dest_aligned(
348 (const unsigned long __user *) src,
349 (unsigned long *) dst,
350 soff, len-8, checksum, errp);
352 unsigned long partial_dest;
353 ldq_u(partial_dest, dst);
355 checksum = csum_partial_cfu_src_aligned(
356 (const unsigned long __user *) src,
357 (unsigned long *) dst,
358 doff, len-8, checksum,
361 checksum = csum_partial_cfu_unaligned(
362 (const unsigned long __user *) src,
363 (unsigned long *) dst,
364 soff, doff, len-8, checksum,
367 checksum = from64to16 (checksum);
369 return (__force __wsum)checksum;
371 EXPORT_SYMBOL(csum_partial_copy_from_user);
374 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
377 mm_segment_t oldfs = get_fs();
379 checksum = csum_partial_copy_from_user((__force const void __user *)src,
380 dst, len, sum, NULL);
384 EXPORT_SYMBOL(csum_partial_copy_nocheck);