hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/include/net/checksum.h
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * INET An implementation of the TCP/IP protocol suite for the LINUX
34 * operating system. INET is implemented using the BSD Socket
....@@ -9,11 +10,6 @@
910 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
1011 * Borrows very liberally from tcp.c and ip.c, see those
1112 * files for more names.
12
- *
13
- * This program is free software; you can redistribute it and/or
14
- * modify it under the terms of the GNU General Public License
15
- * as published by the Free Software Foundation; either version
16
- * 2 of the License, or (at your option) any later version.
1713 */
1814
1915 #ifndef _CHECKSUM_H
....@@ -26,39 +22,39 @@
2622 #include <asm/checksum.h>
2723
2824 #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
29
-static inline
25
+static __always_inline
3026 __wsum csum_and_copy_from_user (const void __user *src, void *dst,
31
- int len, __wsum sum, int *err_ptr)
27
+ int len)
3228 {
33
- if (access_ok(VERIFY_READ, src, len))
34
- return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
35
-
36
- if (len)
37
- *err_ptr = -EFAULT;
38
-
39
- return sum;
29
+ if (copy_from_user(dst, src, len))
30
+ return 0;
31
+ return csum_partial(dst, len, ~0U);
4032 }
4133 #endif
4234
4335 #ifndef HAVE_CSUM_COPY_USER
44
-static __inline__ __wsum csum_and_copy_to_user
45
-(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr)
36
+static __always_inline __wsum csum_and_copy_to_user
37
+(const void *src, void __user *dst, int len)
4638 {
47
- sum = csum_partial(src, len, sum);
39
+ __wsum sum = csum_partial(src, len, ~0U);
4840
49
- if (access_ok(VERIFY_WRITE, dst, len)) {
50
- if (copy_to_user(dst, src, len) == 0)
51
- return sum;
52
- }
53
- if (len)
54
- *err_ptr = -EFAULT;
41
+ if (copy_to_user(dst, src, len) == 0)
42
+ return sum;
43
+ return 0;
44
+}
45
+#endif
5546
56
- return (__force __wsum)-1; /* invalid checksum */
47
+#ifndef _HAVE_ARCH_CSUM_AND_COPY
48
+static __always_inline __wsum
49
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
50
+{
51
+ memcpy(dst, src, len);
52
+ return csum_partial(dst, len, 0);
5753 }
5854 #endif
5955
6056 #ifndef HAVE_ARCH_CSUM_ADD
61
-static inline __wsum csum_add(__wsum csum, __wsum addend)
57
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
6258 {
6359 u32 res = (__force u32)csum;
6460 res += (__force u32)addend;
....@@ -66,12 +62,12 @@
6662 }
6763 #endif
6864
69
-static inline __wsum csum_sub(__wsum csum, __wsum addend)
65
+static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
7066 {
7167 return csum_add(csum, ~addend);
7268 }
7369
74
-static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
70
+static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
7571 {
7672 u16 res = (__force u16)csum;
7773
....@@ -79,12 +75,12 @@
7975 return (__force __sum16)(res + (res < (__force u16)addend));
8076 }
8177
82
-static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
78
+static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
8379 {
8480 return csum16_add(csum, ~addend);
8581 }
8682
87
-static inline __wsum
83
+static __always_inline __wsum
8884 csum_block_add(__wsum csum, __wsum csum2, int offset)
8985 {
9086 u32 sum = (__force u32)csum2;
....@@ -96,36 +92,37 @@
9692 return csum_add(csum, (__force __wsum)sum);
9793 }
9894
99
-static inline __wsum
95
+static __always_inline __wsum
10096 csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
10197 {
10298 return csum_block_add(csum, csum2, offset);
10399 }
104100
105
-static inline __wsum
101
+static __always_inline __wsum
106102 csum_block_sub(__wsum csum, __wsum csum2, int offset)
107103 {
108104 return csum_block_add(csum, ~csum2, offset);
109105 }
110106
111
-static inline __wsum csum_unfold(__sum16 n)
107
+static __always_inline __wsum csum_unfold(__sum16 n)
112108 {
113109 return (__force __wsum)n;
114110 }
115111
116
-static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
112
+static __always_inline
113
+__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
117114 {
118115 return csum_partial(buff, len, sum);
119116 }
120117
121118 #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
122119
123
-static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
120
+static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
124121 {
125122 *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
126123 }
127124
128
-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
125
+static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
129126 {
130127 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
131128
....@@ -138,7 +135,7 @@
138135 * m : old value of a 16bit field
139136 * m' : new value of a 16bit field
140137 */
141
-static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
138
+static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
142139 {
143140 *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
144141 }
....@@ -157,16 +154,16 @@
157154 void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
158155 __wsum diff, bool pseudohdr);
159156
160
-static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
161
- __be16 from, __be16 to,
162
- bool pseudohdr)
157
+static __always_inline
158
+void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
159
+ __be16 from, __be16 to, bool pseudohdr)
163160 {
164161 inet_proto_csum_replace4(sum, skb, (__force __be32)from,
165162 (__force __be32)to, pseudohdr);
166163 }
167164
168
-static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
169
- int start, int offset)
165
+static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
166
+ int start, int offset)
170167 {
171168 __sum16 *psum = (__sum16 *)(ptr + offset);
172169 __wsum delta;
....@@ -182,7 +179,7 @@
182179 return delta;
183180 }
184181
185
-static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
182
+static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
186183 {
187184 *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
188185 }