.. | .. |
---|
317 | 317 | * @sk_tskey: counter to disambiguate concurrent tstamp requests |
---|
318 | 318 | * @sk_zckey: counter to order MSG_ZEROCOPY notifications |
---|
319 | 319 | * @sk_socket: Identd and reporting IO signals |
---|
320 | | - * @sk_user_data: RPC layer private data |
---|
| 320 | + * @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock. |
---|
321 | 321 | * @sk_frag: cached page frag |
---|
322 | 322 | * @sk_peek_off: current peek_offset value |
---|
323 | 323 | * @sk_send_head: front of stuff to transmit |
---|
.. | .. |
---|
1092 | 1092 | * OR an additional socket flag |
---|
1093 | 1093 | * [1] : sk_state and sk_prot are in the same cache line. |
---|
1094 | 1094 | */ |
---|
1095 | | - if (sk->sk_state == TCP_ESTABLISHED) |
---|
1096 | | - sock_rps_record_flow_hash(sk->sk_rxhash); |
---|
| 1095 | + if (sk->sk_state == TCP_ESTABLISHED) { |
---|
| 1096 | + /* This READ_ONCE() is paired with the WRITE_ONCE() |
---|
| 1097 | + * from sock_rps_save_rxhash() and sock_rps_reset_rxhash(). |
---|
| 1098 | + */ |
---|
| 1099 | + sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); |
---|
| 1100 | + } |
---|
1097 | 1101 | } |
---|
1098 | 1102 | #endif |
---|
1099 | 1103 | } |
---|
.. | .. |
---|
1102 | 1106 | const struct sk_buff *skb) |
---|
1103 | 1107 | { |
---|
1104 | 1108 | #ifdef CONFIG_RPS |
---|
1105 | | - if (unlikely(sk->sk_rxhash != skb->hash)) |
---|
1106 | | - sk->sk_rxhash = skb->hash; |
---|
| 1109 | + /* The following WRITE_ONCE() is paired with the READ_ONCE() |
---|
| 1110 | + * here, and another one in sock_rps_record_flow(). |
---|
| 1111 | + */ |
---|
| 1112 | + if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash)) |
---|
| 1113 | + WRITE_ONCE(sk->sk_rxhash, skb->hash); |
---|
1107 | 1114 | #endif |
---|
1108 | 1115 | } |
---|
1109 | 1116 | |
---|
1110 | 1117 | static inline void sock_rps_reset_rxhash(struct sock *sk) |
---|
1111 | 1118 | { |
---|
1112 | 1119 | #ifdef CONFIG_RPS |
---|
1113 | | - sk->sk_rxhash = 0; |
---|
| 1120 | + /* Paired with READ_ONCE() in sock_rps_record_flow() */ |
---|
| 1121 | + WRITE_ONCE(sk->sk_rxhash, 0); |
---|
1114 | 1122 | #endif |
---|
1115 | 1123 | } |
---|
1116 | 1124 | |
---|
.. | .. |
---|
1240 | 1248 | /* |
---|
1241 | 1249 | * Pressure flag: try to collapse. |
---|
1242 | 1250 | * Technical note: it is used by multiple contexts non atomically. |
---|
| 1251 | + * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes. |
---|
1243 | 1252 | * All the __sk_mem_schedule() is of this nature: accounting |
---|
1244 | 1253 | * is strict, actions are advisory and have some latency. |
---|
1245 | 1254 | */ |
---|
.. | .. |
---|
1353 | 1362 | return sk->sk_prot->memory_pressure != NULL; |
---|
1354 | 1363 | } |
---|
1355 | 1364 | |
---|
| 1365 | +static inline bool sk_under_global_memory_pressure(const struct sock *sk) |
---|
| 1366 | +{ |
---|
| 1367 | + return sk->sk_prot->memory_pressure && |
---|
| 1368 | + !!READ_ONCE(*sk->sk_prot->memory_pressure); |
---|
| 1369 | +} |
---|
| 1370 | + |
---|
1356 | 1371 | static inline bool sk_under_memory_pressure(const struct sock *sk) |
---|
1357 | 1372 | { |
---|
1358 | 1373 | if (!sk->sk_prot->memory_pressure) |
---|
.. | .. |
---|
1362 | 1377 | mem_cgroup_under_socket_pressure(sk->sk_memcg)) |
---|
1363 | 1378 | return true; |
---|
1364 | 1379 | |
---|
1365 | | - return !!*sk->sk_prot->memory_pressure; |
---|
| 1380 | + return !!READ_ONCE(*sk->sk_prot->memory_pressure); |
---|
1366 | 1381 | } |
---|
1367 | 1382 | |
---|
1368 | 1383 | static inline long |
---|
.. | .. |
---|
1416 | 1431 | { |
---|
1417 | 1432 | if (!prot->memory_pressure) |
---|
1418 | 1433 | return false; |
---|
1419 | | - return !!*prot->memory_pressure; |
---|
| 1434 | + return !!READ_ONCE(*prot->memory_pressure); |
---|
1420 | 1435 | } |
---|
1421 | 1436 | |
---|
1422 | 1437 | |
---|
.. | .. |
---|
1796 | 1811 | * Default socket callbacks and setup code |
---|
1797 | 1812 | */ |
---|
1798 | 1813 | |
---|
1799 | | -/* Initialise core socket variables */ |
---|
| 1814 | +/* Initialise core socket variables using an explicit uid. */ |
---|
| 1815 | +void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid); |
---|
| 1816 | + |
---|
| 1817 | +/* Initialise core socket variables. |
---|
| 1818 | + * Assumes struct socket *sock is embedded in a struct socket_alloc. |
---|
| 1819 | + */ |
---|
1800 | 1820 | void sock_init_data(struct socket *sock, struct sock *sk); |
---|
1801 | 1821 | |
---|
1802 | 1822 | /* |
---|
.. | .. |
---|
1936 | 1956 | } |
---|
1937 | 1957 | |
---|
1938 | 1958 | kuid_t sock_i_uid(struct sock *sk); |
---|
| 1959 | +unsigned long __sock_i_ino(struct sock *sk); |
---|
1939 | 1960 | unsigned long sock_i_ino(struct sock *sk); |
---|
1940 | 1961 | |
---|
1941 | 1962 | static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) |
---|
.. | .. |
---|
2264 | 2285 | return false; |
---|
2265 | 2286 | } |
---|
2266 | 2287 | |
---|
| 2288 | +static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk) |
---|
| 2289 | +{ |
---|
| 2290 | + skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); |
---|
| 2291 | + if (skb) { |
---|
| 2292 | + if (sk_rmem_schedule(sk, skb, skb->truesize)) { |
---|
| 2293 | + skb_set_owner_r(skb, sk); |
---|
| 2294 | + return skb; |
---|
| 2295 | + } |
---|
| 2296 | + __kfree_skb(skb); |
---|
| 2297 | + } |
---|
| 2298 | + return NULL; |
---|
| 2299 | +} |
---|
| 2300 | + |
---|
2267 | 2301 | void sk_reset_timer(struct sock *sk, struct timer_list *timer, |
---|
2268 | 2302 | unsigned long expires); |
---|
2269 | 2303 | |
---|
.. | .. |
---|
2538 | 2572 | __sock_recv_ts_and_drops(msg, sk, skb); |
---|
2539 | 2573 | else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) |
---|
2540 | 2574 | sock_write_timestamp(sk, skb->tstamp); |
---|
2541 | | - else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP)) |
---|
| 2575 | + else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP)) |
---|
2542 | 2576 | sock_write_timestamp(sk, 0); |
---|
2543 | 2577 | } |
---|
2544 | 2578 | |
---|