.. | .. |
---|
810 | 810 | psock = sk_psock_get(sk); |
---|
811 | 811 | if (!psock || !policy) { |
---|
812 | 812 | err = tls_push_record(sk, flags, record_type); |
---|
813 | | - if (err && sk->sk_err == EBADMSG) { |
---|
| 813 | + if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { |
---|
814 | 814 | *copied -= sk_msg_free(sk, msg); |
---|
815 | 815 | tls_free_open_rec(sk); |
---|
816 | 816 | err = -sk->sk_err; |
---|
.. | .. |
---|
839 | 839 | switch (psock->eval) { |
---|
840 | 840 | case __SK_PASS: |
---|
841 | 841 | err = tls_push_record(sk, flags, record_type); |
---|
842 | | - if (err && sk->sk_err == EBADMSG) { |
---|
| 842 | + if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { |
---|
843 | 843 | *copied -= sk_msg_free(sk, msg); |
---|
844 | 844 | tls_free_open_rec(sk); |
---|
845 | 845 | err = -sk->sk_err; |
---|
.. | .. |
---|
949 | 949 | MSG_CMSG_COMPAT)) |
---|
950 | 950 | return -EOPNOTSUPP; |
---|
951 | 951 | |
---|
952 | | - mutex_lock(&tls_ctx->tx_lock); |
---|
| 952 | + ret = mutex_lock_interruptible(&tls_ctx->tx_lock); |
---|
| 953 | + if (ret) |
---|
| 954 | + return ret; |
---|
953 | 955 | lock_sock(sk); |
---|
954 | 956 | |
---|
955 | 957 | if (unlikely(msg->msg_controllen)) { |
---|
.. | .. |
---|
1283 | 1285 | MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) |
---|
1284 | 1286 | return -EOPNOTSUPP; |
---|
1285 | 1287 | |
---|
1286 | | - mutex_lock(&tls_ctx->tx_lock); |
---|
| 1288 | + ret = mutex_lock_interruptible(&tls_ctx->tx_lock); |
---|
| 1289 | + if (ret) |
---|
| 1290 | + return ret; |
---|
1287 | 1291 | lock_sock(sk); |
---|
1288 | 1292 | ret = tls_sw_do_sendpage(sk, page, offset, size, flags); |
---|
1289 | 1293 | release_sock(sk); |
---|
.. | .. |
---|
2266 | 2270 | |
---|
2267 | 2271 | if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) |
---|
2268 | 2272 | return; |
---|
2269 | | - mutex_lock(&tls_ctx->tx_lock); |
---|
2270 | | - lock_sock(sk); |
---|
2271 | | - tls_tx_records(sk, -1); |
---|
2272 | | - release_sock(sk); |
---|
2273 | | - mutex_unlock(&tls_ctx->tx_lock); |
---|
| 2273 | + |
---|
| 2274 | + if (mutex_trylock(&tls_ctx->tx_lock)) { |
---|
| 2275 | + lock_sock(sk); |
---|
| 2276 | + tls_tx_records(sk, -1); |
---|
| 2277 | + release_sock(sk); |
---|
| 2278 | + mutex_unlock(&tls_ctx->tx_lock); |
---|
| 2279 | + } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { |
---|
| 2280 | + /* Someone is holding the tx_lock, they will likely run Tx |
---|
| 2281 | + * and cancel the work on their way out of the lock section. |
---|
| 2282 | + * Schedule a long delay just in case. |
---|
| 2283 | + */ |
---|
| 2284 | + schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10)); |
---|
| 2285 | + } |
---|
2274 | 2286 | } |
---|
2275 | 2287 | |
---|
2276 | 2288 | void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) |
---|