hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/net/tls/tls_device.c
....@@ -38,6 +38,8 @@
3838 #include <net/tcp.h>
3939 #include <net/tls.h>
4040
41
+#include "trace.h"
42
+
4143 /* device_offload_lock is used to synchronize tls_dev_add
4244 * against NETDEV_DOWN notifications.
4345 */
....@@ -48,6 +50,7 @@
4850 static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
4951 static LIST_HEAD(tls_device_gc_list);
5052 static LIST_HEAD(tls_device_list);
53
+static LIST_HEAD(tls_device_down_list);
5154 static DEFINE_SPINLOCK(tls_device_lock);
5255
5356 static void tls_device_free_ctx(struct tls_context *ctx)
....@@ -61,7 +64,7 @@
6164 if (ctx->rx_conf == TLS_HW)
6265 kfree(tls_offload_ctx_rx(ctx));
6366
64
- tls_ctx_free(ctx);
67
+ tls_ctx_free(NULL, ctx);
6568 }
6669
6770 static void tls_device_gc_task(struct work_struct *work)
....@@ -89,34 +92,21 @@
8992 }
9093 }
9194
92
-static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
93
- struct net_device *netdev)
94
-{
95
- if (sk->sk_destruct != tls_device_sk_destruct) {
96
- refcount_set(&ctx->refcount, 1);
97
- dev_hold(netdev);
98
- ctx->netdev = netdev;
99
- spin_lock_irq(&tls_device_lock);
100
- list_add_tail(&ctx->list, &tls_device_list);
101
- spin_unlock_irq(&tls_device_lock);
102
-
103
- ctx->sk_destruct = sk->sk_destruct;
104
- sk->sk_destruct = tls_device_sk_destruct;
105
- }
106
-}
107
-
10895 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
10996 {
11097 unsigned long flags;
11198
11299 spin_lock_irqsave(&tls_device_lock, flags);
100
+ if (unlikely(!refcount_dec_and_test(&ctx->refcount)))
101
+ goto unlock;
102
+
113103 list_move_tail(&ctx->list, &tls_device_gc_list);
114104
115105 /* schedule_work inside the spinlock
116106 * to make sure tls_device_down waits for that work.
117107 */
118108 schedule_work(&tls_device_gc_work);
119
-
109
+unlock:
120110 spin_unlock_irqrestore(&tls_device_lock, flags);
121111 }
122112
....@@ -138,13 +128,10 @@
138128
139129 static void destroy_record(struct tls_record_info *record)
140130 {
141
- int nr_frags = record->num_frags;
142
- skb_frag_t *frag;
131
+ int i;
143132
144
- while (nr_frags-- > 0) {
145
- frag = &record->frags[nr_frags];
146
- __skb_frag_unref(frag);
147
- }
133
+ for (i = 0; i < record->num_frags; i++)
134
+ __skb_frag_unref(&record->frags[i]);
148135 kfree(record);
149136 }
150137
....@@ -175,12 +162,8 @@
175162
176163 spin_lock_irqsave(&ctx->lock, flags);
177164 info = ctx->retransmit_hint;
178
- if (info && !before(acked_seq, info->end_seq)) {
165
+ if (info && !before(acked_seq, info->end_seq))
179166 ctx->retransmit_hint = NULL;
180
- list_del(&info->list);
181
- destroy_record(info);
182
- deleted_records++;
183
- }
184167
185168 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
186169 if (before(acked_seq, info->end_seq))
....@@ -214,10 +197,53 @@
214197 clean_acked_data_disable(inet_csk(sk));
215198 }
216199
217
- if (refcount_dec_and_test(&tls_ctx->refcount))
218
- tls_device_queue_ctx_destruction(tls_ctx);
200
+ tls_device_queue_ctx_destruction(tls_ctx);
219201 }
220
-EXPORT_SYMBOL(tls_device_sk_destruct);
202
+EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
203
+
204
+void tls_device_free_resources_tx(struct sock *sk)
205
+{
206
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
207
+
208
+ tls_free_partial_record(sk, tls_ctx);
209
+}
210
+
211
+void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
212
+{
213
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
214
+
215
+ trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
216
+ WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
217
+}
218
+EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
219
+
220
+static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
221
+ u32 seq)
222
+{
223
+ struct net_device *netdev;
224
+ struct sk_buff *skb;
225
+ int err = 0;
226
+ u8 *rcd_sn;
227
+
228
+ skb = tcp_write_queue_tail(sk);
229
+ if (skb)
230
+ TCP_SKB_CB(skb)->eor = 1;
231
+
232
+ rcd_sn = tls_ctx->tx.rec_seq;
233
+
234
+ trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
235
+ down_read(&device_offload_lock);
236
+ netdev = tls_ctx->netdev;
237
+ if (netdev)
238
+ err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
239
+ rcd_sn,
240
+ TLS_OFFLOAD_CTX_DIR_TX);
241
+ up_read(&device_offload_lock);
242
+ if (err)
243
+ return;
244
+
245
+ clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
246
+}
221247
222248 static void tls_append_frag(struct tls_record_info *record,
223249 struct page_frag *pfrag,
....@@ -226,14 +252,14 @@
226252 skb_frag_t *frag;
227253
228254 frag = &record->frags[record->num_frags - 1];
229
- if (frag->page.p == pfrag->page &&
230
- frag->page_offset + frag->size == pfrag->offset) {
231
- frag->size += size;
255
+ if (skb_frag_page(frag) == pfrag->page &&
256
+ skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
257
+ skb_frag_size_add(frag, size);
232258 } else {
233259 ++frag;
234
- frag->page.p = pfrag->page;
235
- frag->page_offset = pfrag->offset;
236
- frag->size = size;
260
+ __skb_frag_set_page(frag, pfrag->page);
261
+ skb_frag_off_set(frag, pfrag->offset);
262
+ skb_frag_size_set(frag, size);
237263 ++record->num_frags;
238264 get_page(pfrag->page);
239265 }
....@@ -246,47 +272,66 @@
246272 struct tls_context *ctx,
247273 struct tls_offload_context_tx *offload_ctx,
248274 struct tls_record_info *record,
249
- struct page_frag *pfrag,
250
- int flags,
251
- unsigned char record_type)
275
+ int flags)
252276 {
277
+ struct tls_prot_info *prot = &ctx->prot_info;
253278 struct tcp_sock *tp = tcp_sk(sk);
254
- struct page_frag dummy_tag_frag;
255279 skb_frag_t *frag;
256280 int i;
257281
258
- /* fill prepend */
259
- frag = &record->frags[0];
260
- tls_fill_prepend(ctx,
261
- skb_frag_address(frag),
262
- record->len - ctx->tx.prepend_size,
263
- record_type);
264
-
265
- /* HW doesn't care about the data in the tag, because it fills it. */
266
- dummy_tag_frag.page = skb_frag_page(frag);
267
- dummy_tag_frag.offset = 0;
268
-
269
- tls_append_frag(record, &dummy_tag_frag, ctx->tx.tag_size);
270282 record->end_seq = tp->write_seq + record->len;
271
- spin_lock_irq(&offload_ctx->lock);
272
- list_add_tail(&record->list, &offload_ctx->records_list);
273
- spin_unlock_irq(&offload_ctx->lock);
283
+ list_add_tail_rcu(&record->list, &offload_ctx->records_list);
274284 offload_ctx->open_record = NULL;
275
- set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
276
- tls_advance_record_sn(sk, &ctx->tx);
285
+
286
+ if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
287
+ tls_device_resync_tx(sk, ctx, tp->write_seq);
288
+
289
+ tls_advance_record_sn(sk, prot, &ctx->tx);
277290
278291 for (i = 0; i < record->num_frags; i++) {
279292 frag = &record->frags[i];
280293 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
281294 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
282
- frag->size, frag->page_offset);
283
- sk_mem_charge(sk, frag->size);
295
+ skb_frag_size(frag), skb_frag_off(frag));
296
+ sk_mem_charge(sk, skb_frag_size(frag));
284297 get_page(skb_frag_page(frag));
285298 }
286299 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
287300
288301 /* all ready, send */
289302 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
303
+}
304
+
305
+static int tls_device_record_close(struct sock *sk,
306
+ struct tls_context *ctx,
307
+ struct tls_record_info *record,
308
+ struct page_frag *pfrag,
309
+ unsigned char record_type)
310
+{
311
+ struct tls_prot_info *prot = &ctx->prot_info;
312
+ int ret;
313
+
314
+ /* append tag
315
+ * device will fill in the tag, we just need to append a placeholder
316
+ * use socket memory to improve coalescing (re-using a single buffer
317
+ * increases frag count)
318
+ * if we can't allocate memory now, steal some back from data
319
+ */
320
+ if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
321
+ sk->sk_allocation))) {
322
+ ret = 0;
323
+ tls_append_frag(record, pfrag, prot->tag_size);
324
+ } else {
325
+ ret = prot->tag_size;
326
+ if (record->len <= prot->overhead_size)
327
+ return -ENOMEM;
328
+ }
329
+
330
+ /* fill prepend */
331
+ tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
332
+ record->len - prot->overhead_size,
333
+ record_type, prot->version);
334
+ return ret;
290335 }
291336
292337 static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
....@@ -302,7 +347,7 @@
302347
303348 frag = &record->frags[0];
304349 __skb_frag_set_page(frag, pfrag->page);
305
- frag->page_offset = pfrag->offset;
350
+ skb_frag_off_set(frag, pfrag->offset);
306351 skb_frag_size_set(frag, prepend_size);
307352
308353 get_page(pfrag->page);
....@@ -324,7 +369,7 @@
324369 if (!offload_ctx->open_record) {
325370 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
326371 sk->sk_allocation))) {
327
- sk->sk_prot->enter_memory_pressure(sk);
372
+ READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
328373 sk_stream_moderate_sndbuf(sk);
329374 return -ENOMEM;
330375 }
....@@ -343,15 +388,41 @@
343388 return 0;
344389 }
345390
391
+static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
392
+{
393
+ size_t pre_copy, nocache;
394
+
395
+ pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
396
+ if (pre_copy) {
397
+ pre_copy = min(pre_copy, bytes);
398
+ if (copy_from_iter(addr, pre_copy, i) != pre_copy)
399
+ return -EFAULT;
400
+ bytes -= pre_copy;
401
+ addr += pre_copy;
402
+ }
403
+
404
+ nocache = round_down(bytes, SMP_CACHE_BYTES);
405
+ if (copy_from_iter_nocache(addr, nocache, i) != nocache)
406
+ return -EFAULT;
407
+ bytes -= nocache;
408
+ addr += nocache;
409
+
410
+ if (bytes && copy_from_iter(addr, bytes, i) != bytes)
411
+ return -EFAULT;
412
+
413
+ return 0;
414
+}
415
+
346416 static int tls_push_data(struct sock *sk,
347417 struct iov_iter *msg_iter,
348418 size_t size, int flags,
349419 unsigned char record_type)
350420 {
351421 struct tls_context *tls_ctx = tls_get_ctx(sk);
422
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
352423 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
353
- int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
354424 struct tls_record_info *record = ctx->open_record;
425
+ int tls_push_record_flags;
355426 struct page_frag *pfrag;
356427 size_t orig_size = size;
357428 u32 max_open_record_len;
....@@ -362,15 +433,20 @@
362433
363434 if (flags &
364435 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
365
- return -ENOTSUPP;
436
+ return -EOPNOTSUPP;
366437
367
- if (sk->sk_err)
438
+ if (unlikely(sk->sk_err))
368439 return -sk->sk_err;
369440
441
+ flags |= MSG_SENDPAGE_DECRYPTED;
442
+ tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
443
+
370444 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
371
- rc = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
372
- if (rc < 0)
373
- return rc;
445
+ if (tls_is_partially_sent_record(tls_ctx)) {
446
+ rc = tls_push_partial_record(sk, tls_ctx, flags);
447
+ if (rc < 0)
448
+ return rc;
449
+ }
374450
375451 pfrag = sk_page_frag(sk);
376452
....@@ -378,11 +454,10 @@
378454 * we need to leave room for an authentication tag.
379455 */
380456 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
381
- tls_ctx->tx.prepend_size;
457
+ prot->prepend_size;
382458 do {
383
- rc = tls_do_allocation(sk, ctx, pfrag,
384
- tls_ctx->tx.prepend_size);
385
- if (rc) {
459
+ rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
460
+ if (unlikely(rc)) {
386461 rc = sk_stream_wait_memory(sk, &timeo);
387462 if (!rc)
388463 continue;
....@@ -399,7 +474,7 @@
399474 size = orig_size;
400475 destroy_record(record);
401476 ctx->open_record = NULL;
402
- } else if (record->len > tls_ctx->tx.prepend_size) {
477
+ } else if (record->len > prot->prepend_size) {
403478 goto last_record;
404479 }
405480
....@@ -410,13 +485,13 @@
410485 copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
411486 copy = min_t(size_t, copy, (max_open_record_len - record->len));
412487
413
- if (copy_from_iter_nocache(page_address(pfrag->page) +
414
- pfrag->offset,
415
- copy, msg_iter) != copy) {
416
- rc = -EFAULT;
417
- goto handle_error;
488
+ if (copy) {
489
+ rc = tls_device_copy_data(page_address(pfrag->page) +
490
+ pfrag->offset, copy, msg_iter);
491
+ if (rc)
492
+ goto handle_error;
493
+ tls_append_frag(record, pfrag, copy);
418494 }
419
- tls_append_frag(record, pfrag, copy);
420495
421496 size -= copy;
422497 if (!size) {
....@@ -432,13 +507,24 @@
432507
433508 if (done || record->len >= max_open_record_len ||
434509 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
510
+ rc = tls_device_record_close(sk, tls_ctx, record,
511
+ pfrag, record_type);
512
+ if (rc) {
513
+ if (rc > 0) {
514
+ size += rc;
515
+ } else {
516
+ size = orig_size;
517
+ destroy_record(record);
518
+ ctx->open_record = NULL;
519
+ break;
520
+ }
521
+ }
522
+
435523 rc = tls_push_record(sk,
436524 tls_ctx,
437525 ctx,
438526 record,
439
- pfrag,
440
- tls_push_record_flags,
441
- record_type);
527
+ tls_push_record_flags);
442528 if (rc < 0)
443529 break;
444530 }
....@@ -455,8 +541,10 @@
455541 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
456542 {
457543 unsigned char record_type = TLS_RECORD_TYPE_DATA;
544
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
458545 int rc;
459546
547
+ mutex_lock(&tls_ctx->tx_lock);
460548 lock_sock(sk);
461549
462550 if (unlikely(msg->msg_controllen)) {
....@@ -470,12 +558,14 @@
470558
471559 out:
472560 release_sock(sk);
561
+ mutex_unlock(&tls_ctx->tx_lock);
473562 return rc;
474563 }
475564
476565 int tls_device_sendpage(struct sock *sk, struct page *page,
477566 int offset, size_t size, int flags)
478567 {
568
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
479569 struct iov_iter msg_iter;
480570 char *kaddr;
481571 struct kvec iov;
....@@ -484,23 +574,25 @@
484574 if (flags & MSG_SENDPAGE_NOTLAST)
485575 flags |= MSG_MORE;
486576
577
+ mutex_lock(&tls_ctx->tx_lock);
487578 lock_sock(sk);
488579
489580 if (flags & MSG_OOB) {
490
- rc = -ENOTSUPP;
581
+ rc = -EOPNOTSUPP;
491582 goto out;
492583 }
493584
494585 kaddr = kmap(page);
495586 iov.iov_base = kaddr + offset;
496587 iov.iov_len = size;
497
- iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
588
+ iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
498589 rc = tls_push_data(sk, &msg_iter, size,
499590 flags, TLS_RECORD_TYPE_DATA);
500591 kunmap(page);
501592
502593 out:
503594 release_sock(sk);
595
+ mutex_unlock(&tls_ctx->tx_lock);
504596 return rc;
505597 }
506598
....@@ -516,9 +608,10 @@
516608 /* if retransmit_hint is irrelevant start
517609 * from the beggining of the list
518610 */
519
- info = list_first_entry(&context->records_list,
520
- struct tls_record_info, list);
521
-
611
+ info = list_first_entry_or_null(&context->records_list,
612
+ struct tls_record_info, list);
613
+ if (!info)
614
+ return NULL;
522615 /* send the start_marker record if seq number is before the
523616 * tls offload start marker sequence number. This record is
524617 * required to handle TCP packets which are before TLS offload
....@@ -540,7 +633,9 @@
540633 record_sn = context->unacked_record_sn;
541634 }
542635
543
- list_for_each_entry_from(info, &context->records_list, list) {
636
+ /* We just need the _rcu for the READ_ONCE() */
637
+ rcu_read_lock();
638
+ list_for_each_entry_from_rcu(info, &context->records_list, list) {
544639 if (before(seq, info->end_seq)) {
545640 if (!context->retransmit_hint ||
546641 after(info->end_seq,
....@@ -549,12 +644,15 @@
549644 context->retransmit_hint = info;
550645 }
551646 *p_record_sn = record_sn;
552
- return info;
647
+ goto exit_rcu_unlock;
553648 }
554649 record_sn++;
555650 }
651
+ info = NULL;
556652
557
- return NULL;
653
+exit_rcu_unlock:
654
+ rcu_read_unlock();
655
+ return info;
558656 }
559657 EXPORT_SYMBOL(tls_get_record);
560658
....@@ -562,43 +660,206 @@
562660 {
563661 struct iov_iter msg_iter;
564662
565
- iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
663
+ iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
566664 return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
567665 }
568666
569
-static void tls_device_resync_rx(struct tls_context *tls_ctx,
570
- struct sock *sk, u32 seq, u64 rcd_sn)
667
+void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
571668 {
572
- struct net_device *netdev;
669
+ if (tls_is_partially_sent_record(ctx)) {
670
+ gfp_t sk_allocation = sk->sk_allocation;
573671
574
- if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
575
- return;
576
- netdev = READ_ONCE(tls_ctx->netdev);
577
- if (netdev)
578
- netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
579
- clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
672
+ WARN_ON_ONCE(sk->sk_write_pending);
673
+
674
+ sk->sk_allocation = GFP_ATOMIC;
675
+ tls_push_partial_record(sk, ctx,
676
+ MSG_DONTWAIT | MSG_NOSIGNAL |
677
+ MSG_SENDPAGE_DECRYPTED);
678
+ sk->sk_allocation = sk_allocation;
679
+ }
580680 }
581681
582
-void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
682
+static void tls_device_resync_rx(struct tls_context *tls_ctx,
683
+ struct sock *sk, u32 seq, u8 *rcd_sn)
684
+{
685
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
686
+ struct net_device *netdev;
687
+
688
+ trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
689
+ rcu_read_lock();
690
+ netdev = READ_ONCE(tls_ctx->netdev);
691
+ if (netdev)
692
+ netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
693
+ TLS_OFFLOAD_CTX_DIR_RX);
694
+ rcu_read_unlock();
695
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
696
+}
697
+
698
+static bool
699
+tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
700
+ s64 resync_req, u32 *seq, u16 *rcd_delta)
701
+{
702
+ u32 is_async = resync_req & RESYNC_REQ_ASYNC;
703
+ u32 req_seq = resync_req >> 32;
704
+ u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
705
+ u16 i;
706
+
707
+ *rcd_delta = 0;
708
+
709
+ if (is_async) {
710
+ /* shouldn't get to wraparound:
711
+ * too long in async stage, something bad happened
712
+ */
713
+ if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
714
+ return false;
715
+
716
+ /* asynchronous stage: log all headers seq such that
717
+ * req_seq <= seq <= end_seq, and wait for real resync request
718
+ */
719
+ if (before(*seq, req_seq))
720
+ return false;
721
+ if (!after(*seq, req_end) &&
722
+ resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
723
+ resync_async->log[resync_async->loglen++] = *seq;
724
+
725
+ resync_async->rcd_delta++;
726
+
727
+ return false;
728
+ }
729
+
730
+ /* synchronous stage: check against the logged entries and
731
+ * proceed to check the next entries if no match was found
732
+ */
733
+ for (i = 0; i < resync_async->loglen; i++)
734
+ if (req_seq == resync_async->log[i] &&
735
+ atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
736
+ *rcd_delta = resync_async->rcd_delta - i;
737
+ *seq = req_seq;
738
+ resync_async->loglen = 0;
739
+ resync_async->rcd_delta = 0;
740
+ return true;
741
+ }
742
+
743
+ resync_async->loglen = 0;
744
+ resync_async->rcd_delta = 0;
745
+
746
+ if (req_seq == *seq &&
747
+ atomic64_try_cmpxchg(&resync_async->req,
748
+ &resync_req, 0))
749
+ return true;
750
+
751
+ return false;
752
+}
753
+
754
+void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
583755 {
584756 struct tls_context *tls_ctx = tls_get_ctx(sk);
585757 struct tls_offload_context_rx *rx_ctx;
586
- u32 is_req_pending;
758
+ u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
759
+ u32 sock_data, is_req_pending;
760
+ struct tls_prot_info *prot;
587761 s64 resync_req;
762
+ u16 rcd_delta;
588763 u32 req_seq;
589764
590765 if (tls_ctx->rx_conf != TLS_HW)
591766 return;
767
+ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
768
+ return;
592769
770
+ prot = &tls_ctx->prot_info;
593771 rx_ctx = tls_offload_ctx_rx(tls_ctx);
594
- resync_req = atomic64_read(&rx_ctx->resync_req);
595
- req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
596
- is_req_pending = resync_req;
772
+ memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
597773
598
- if (unlikely(is_req_pending) && req_seq == seq &&
599
- atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
774
+ switch (rx_ctx->resync_type) {
775
+ case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
776
+ resync_req = atomic64_read(&rx_ctx->resync_req);
777
+ req_seq = resync_req >> 32;
600778 seq += TLS_HEADER_SIZE - 1;
601
- tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
779
+ is_req_pending = resync_req;
780
+
781
+ if (likely(!is_req_pending) || req_seq != seq ||
782
+ !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
783
+ return;
784
+ break;
785
+ case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
786
+ if (likely(!rx_ctx->resync_nh_do_now))
787
+ return;
788
+
789
+ /* head of next rec is already in, note that the sock_inq will
790
+ * include the currently parsed message when called from parser
791
+ */
792
+ sock_data = tcp_inq(sk);
793
+ if (sock_data > rcd_len) {
794
+ trace_tls_device_rx_resync_nh_delay(sk, sock_data,
795
+ rcd_len);
796
+ return;
797
+ }
798
+
799
+ rx_ctx->resync_nh_do_now = 0;
800
+ seq += rcd_len;
801
+ tls_bigint_increment(rcd_sn, prot->rec_seq_size);
802
+ break;
803
+ case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
804
+ resync_req = atomic64_read(&rx_ctx->resync_async->req);
805
+ is_req_pending = resync_req;
806
+ if (likely(!is_req_pending))
807
+ return;
808
+
809
+ if (!tls_device_rx_resync_async(rx_ctx->resync_async,
810
+ resync_req, &seq, &rcd_delta))
811
+ return;
812
+ tls_bigint_subtract(rcd_sn, rcd_delta);
813
+ break;
814
+ }
815
+
816
+ tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
817
+}
818
+
819
+static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
820
+ struct tls_offload_context_rx *ctx,
821
+ struct sock *sk, struct sk_buff *skb)
822
+{
823
+ struct strp_msg *rxm;
824
+
825
+ /* device will request resyncs by itself based on stream scan */
826
+ if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
827
+ return;
828
+ /* already scheduled */
829
+ if (ctx->resync_nh_do_now)
830
+ return;
831
+ /* seen decrypted fragments since last fully-failed record */
832
+ if (ctx->resync_nh_reset) {
833
+ ctx->resync_nh_reset = 0;
834
+ ctx->resync_nh.decrypted_failed = 1;
835
+ ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
836
+ return;
837
+ }
838
+
839
+ if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
840
+ return;
841
+
842
+ /* doing resync, bump the next target in case it fails */
843
+ if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
844
+ ctx->resync_nh.decrypted_tgt *= 2;
845
+ else
846
+ ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
847
+
848
+ rxm = strp_msg(skb);
849
+
850
+ /* head of next rec is already in, parser will sync for us */
851
+ if (tcp_inq(sk) > rxm->full_len) {
852
+ trace_tls_device_rx_resync_nh_schedule(sk);
853
+ ctx->resync_nh_do_now = 1;
854
+ } else {
855
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
856
+ u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
857
+
858
+ memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
859
+ tls_bigint_increment(rcd_sn, prot->rec_seq_size);
860
+
861
+ tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
862
+ rcd_sn);
602863 }
603864 }
604865
....@@ -626,8 +887,10 @@
626887 sg_set_buf(&sg[0], buf,
627888 rxm->full_len + TLS_HEADER_SIZE +
628889 TLS_CIPHER_AES_GCM_128_IV_SIZE);
629
- skb_copy_bits(skb, offset, buf,
630
- TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
890
+ err = skb_copy_bits(skb, offset, buf,
891
+ TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
892
+ if (err)
893
+ goto free_buf;
631894
632895 /* We are interested only in the decrypted data not the auth */
633896 err = decrypt_skb(sk, skb, sg);
....@@ -641,8 +904,11 @@
641904 if (skb_pagelen(skb) > offset) {
642905 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
643906
644
- if (skb->decrypted)
645
- skb_store_bits(skb, offset, buf, copy);
907
+ if (skb->decrypted) {
908
+ err = skb_store_bits(skb, offset, buf, copy);
909
+ if (err)
910
+ goto free_buf;
911
+ }
646912
647913 offset += copy;
648914 buf += copy;
....@@ -665,8 +931,11 @@
665931 copy = min_t(int, skb_iter->len - frag_pos,
666932 data_len + rxm->offset - offset);
667933
668
- if (skb_iter->decrypted)
669
- skb_store_bits(skb_iter, frag_pos, buf, copy);
934
+ if (skb_iter->decrypted) {
935
+ err = skb_store_bits(skb_iter, frag_pos, buf, copy);
936
+ if (err)
937
+ goto free_buf;
938
+ }
670939
671940 offset += copy;
672941 buf += copy;
....@@ -679,17 +948,13 @@
679948 return err;
680949 }
681950
682
-int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
951
+int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
952
+ struct sk_buff *skb, struct strp_msg *rxm)
683953 {
684
- struct tls_context *tls_ctx = tls_get_ctx(sk);
685954 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
686955 int is_decrypted = skb->decrypted;
687956 int is_encrypted = !is_decrypted;
688957 struct sk_buff *skb_iter;
689
-
690
- /* Skip if it is already decrypted */
691
- if (ctx->sw.decrypted)
692
- return 0;
693958
694959 /* Check if all the data is decrypted already */
695960 skb_walk_frags(skb, skb_iter) {
....@@ -697,41 +962,79 @@
697962 is_encrypted &= !skb_iter->decrypted;
698963 }
699964
965
+ trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
966
+ tls_ctx->rx.rec_seq, rxm->full_len,
967
+ is_encrypted, is_decrypted);
968
+
700969 ctx->sw.decrypted |= is_decrypted;
701970
702
- /* Return immedeatly if the record is either entirely plaintext or
971
+ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
972
+ if (likely(is_encrypted || is_decrypted))
973
+ return 0;
974
+
975
+ /* After tls_device_down disables the offload, the next SKB will
976
+ * likely have initial fragments decrypted, and final ones not
977
+ * decrypted. We need to reencrypt that single SKB.
978
+ */
979
+ return tls_device_reencrypt(sk, skb);
980
+ }
981
+
982
+ /* Return immediately if the record is either entirely plaintext or
703983 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
704984 * record.
705985 */
706
- return (is_encrypted || is_decrypted) ? 0 :
707
- tls_device_reencrypt(sk, skb);
986
+ if (is_decrypted) {
987
+ ctx->resync_nh_reset = 1;
988
+ return 0;
989
+ }
990
+ if (is_encrypted) {
991
+ tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
992
+ return 0;
993
+ }
994
+
995
+ ctx->resync_nh_reset = 1;
996
+ return tls_device_reencrypt(sk, skb);
997
+}
998
+
999
+static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
1000
+ struct net_device *netdev)
1001
+{
1002
+ if (sk->sk_destruct != tls_device_sk_destruct) {
1003
+ refcount_set(&ctx->refcount, 1);
1004
+ dev_hold(netdev);
1005
+ ctx->netdev = netdev;
1006
+ spin_lock_irq(&tls_device_lock);
1007
+ list_add_tail(&ctx->list, &tls_device_list);
1008
+ spin_unlock_irq(&tls_device_lock);
1009
+
1010
+ ctx->sk_destruct = sk->sk_destruct;
1011
+ smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
1012
+ }
7081013 }
7091014
7101015 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
7111016 {
7121017 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1018
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
1019
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
7131020 struct tls_record_info *start_marker_record;
7141021 struct tls_offload_context_tx *offload_ctx;
7151022 struct tls_crypto_info *crypto_info;
7161023 struct net_device *netdev;
7171024 char *iv, *rec_seq;
7181025 struct sk_buff *skb;
719
- int rc = -EINVAL;
7201026 __be64 rcd_sn;
1027
+ int rc;
7211028
7221029 if (!ctx)
723
- goto out;
1030
+ return -EINVAL;
7241031
725
- if (ctx->priv_ctx_tx) {
726
- rc = -EEXIST;
727
- goto out;
728
- }
1032
+ if (ctx->priv_ctx_tx)
1033
+ return -EEXIST;
7291034
7301035 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
731
- if (!start_marker_record) {
732
- rc = -ENOMEM;
733
- goto out;
734
- }
1036
+ if (!start_marker_record)
1037
+ return -ENOMEM;
7351038
7361039 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
7371040 if (!offload_ctx) {
....@@ -740,6 +1043,11 @@
7401043 }
7411044
7421045 crypto_info = &ctx->crypto_send.info;
1046
+ if (crypto_info->version != TLS_1_2_VERSION) {
1047
+ rc = -EOPNOTSUPP;
1048
+ goto free_offload_ctx;
1049
+ }
1050
+
7431051 switch (crypto_info->cipher_type) {
7441052 case TLS_CIPHER_AES_GCM_128:
7451053 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
....@@ -755,10 +1063,18 @@
7551063 goto free_offload_ctx;
7561064 }
7571065
758
- ctx->tx.prepend_size = TLS_HEADER_SIZE + nonce_size;
759
- ctx->tx.tag_size = tag_size;
760
- ctx->tx.overhead_size = ctx->tx.prepend_size + ctx->tx.tag_size;
761
- ctx->tx.iv_size = iv_size;
1066
+ /* Sanity-check the rec_seq_size for stack allocations */
1067
+ if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
1068
+ rc = -EINVAL;
1069
+ goto free_offload_ctx;
1070
+ }
1071
+
1072
+ prot->version = crypto_info->version;
1073
+ prot->cipher_type = crypto_info->cipher_type;
1074
+ prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
1075
+ prot->tag_size = tag_size;
1076
+ prot->overhead_size = prot->prepend_size + prot->tag_size;
1077
+ prot->iv_size = iv_size;
7621078 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
7631079 GFP_KERNEL);
7641080 if (!ctx->tx.iv) {
....@@ -768,7 +1084,7 @@
7681084
7691085 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
7701086
771
- ctx->tx.rec_seq_size = rec_seq_size;
1087
+ prot->rec_seq_size = rec_seq_size;
7721088 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
7731089 if (!ctx->tx.rec_seq) {
7741090 rc = -ENOMEM;
....@@ -804,41 +1120,43 @@
8041120 if (skb)
8051121 TCP_SKB_CB(skb)->eor = 1;
8061122
807
- /* We support starting offload on multiple sockets
808
- * concurrently, so we only need a read lock here.
809
- * This lock must precede get_netdev_for_sock to prevent races between
810
- * NETDEV_DOWN and setsockopt.
811
- */
812
- down_read(&device_offload_lock);
8131123 netdev = get_netdev_for_sock(sk);
8141124 if (!netdev) {
8151125 pr_err_ratelimited("%s: netdev not found\n", __func__);
8161126 rc = -EINVAL;
817
- goto release_lock;
1127
+ goto disable_cad;
8181128 }
8191129
8201130 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
821
- rc = -ENOTSUPP;
1131
+ rc = -EOPNOTSUPP;
8221132 goto release_netdev;
8231133 }
8241134
8251135 /* Avoid offloading if the device is down
8261136 * We don't want to offload new flows after
8271137 * the NETDEV_DOWN event
1138
+ *
1139
+ * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1140
+ * handler thus protecting from the device going down before
1141
+ * ctx was added to tls_device_list.
8281142 */
1143
+ down_read(&device_offload_lock);
8291144 if (!(netdev->flags & IFF_UP)) {
8301145 rc = -EINVAL;
831
- goto release_netdev;
1146
+ goto release_lock;
8321147 }
8331148
8341149 ctx->priv_ctx_tx = offload_ctx;
8351150 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
8361151 &ctx->crypto_send.info,
8371152 tcp_sk(sk)->write_seq);
1153
+ trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
1154
+ tcp_sk(sk)->write_seq, rec_seq, rc);
8381155 if (rc)
839
- goto release_netdev;
1156
+ goto release_lock;
8401157
8411158 tls_device_attach(ctx, sk, netdev);
1159
+ up_read(&device_offload_lock);
8421160
8431161 /* following this assignment tls_is_sk_tx_device_offloaded
8441162 * will return true and the context might be accessed
....@@ -846,13 +1164,14 @@
8461164 */
8471165 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
8481166 dev_put(netdev);
849
- up_read(&device_offload_lock);
850
- goto out;
8511167
852
-release_netdev:
853
- dev_put(netdev);
1168
+ return 0;
1169
+
8541170 release_lock:
8551171 up_read(&device_offload_lock);
1172
+release_netdev:
1173
+ dev_put(netdev);
1174
+disable_cad:
8561175 clean_acked_data_disable(inet_csk(sk));
8571176 crypto_free_aead(offload_ctx->aead_send);
8581177 free_rec_seq:
....@@ -864,50 +1183,50 @@
8641183 ctx->priv_ctx_tx = NULL;
8651184 free_marker_record:
8661185 kfree(start_marker_record);
867
-out:
8681186 return rc;
8691187 }
8701188
8711189 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
8721190 {
1191
+ struct tls12_crypto_info_aes_gcm_128 *info;
8731192 struct tls_offload_context_rx *context;
8741193 struct net_device *netdev;
8751194 int rc = 0;
8761195
877
- /* We support starting offload on multiple sockets
878
- * concurrently, so we only need a read lock here.
879
- * This lock must precede get_netdev_for_sock to prevent races between
880
- * NETDEV_DOWN and setsockopt.
881
- */
882
- down_read(&device_offload_lock);
1196
+ if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1197
+ return -EOPNOTSUPP;
1198
+
8831199 netdev = get_netdev_for_sock(sk);
8841200 if (!netdev) {
8851201 pr_err_ratelimited("%s: netdev not found\n", __func__);
886
- rc = -EINVAL;
887
- goto release_lock;
1202
+ return -EINVAL;
8881203 }
8891204
8901205 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
891
- pr_err_ratelimited("%s: netdev %s with no TLS offload\n",
892
- __func__, netdev->name);
893
- rc = -ENOTSUPP;
1206
+ rc = -EOPNOTSUPP;
8941207 goto release_netdev;
8951208 }
8961209
8971210 /* Avoid offloading if the device is down
8981211 * We don't want to offload new flows after
8991212 * the NETDEV_DOWN event
1213
+ *
1214
+ * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1215
+ * handler thus protecting from the device going down before
1216
+ * ctx was added to tls_device_list.
9001217 */
1218
+ down_read(&device_offload_lock);
9011219 if (!(netdev->flags & IFF_UP)) {
9021220 rc = -EINVAL;
903
- goto release_netdev;
1221
+ goto release_lock;
9041222 }
9051223
9061224 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
9071225 if (!context) {
9081226 rc = -ENOMEM;
909
- goto release_netdev;
1227
+ goto release_lock;
9101228 }
1229
+ context->resync_nh_reset = 1;
9111230
9121231 ctx->priv_ctx_rx = context;
9131232 rc = tls_set_sw_offload(sk, ctx, 0);
....@@ -917,14 +1236,18 @@
9171236 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
9181237 &ctx->crypto_recv.info,
9191238 tcp_sk(sk)->copied_seq);
920
- if (rc) {
921
- pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
922
- __func__);
1239
+ info = (void *)&ctx->crypto_recv.info;
1240
+ trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
1241
+ tcp_sk(sk)->copied_seq, info->rec_seq, rc);
1242
+ if (rc)
9231243 goto free_sw_resources;
924
- }
9251244
9261245 tls_device_attach(ctx, sk, netdev);
927
- goto release_netdev;
1246
+ up_read(&device_offload_lock);
1247
+
1248
+ dev_put(netdev);
1249
+
1250
+ return 0;
9281251
9291252 free_sw_resources:
9301253 up_read(&device_offload_lock);
....@@ -932,10 +1255,10 @@
9321255 down_read(&device_offload_lock);
9331256 release_ctx:
9341257 ctx->priv_ctx_rx = NULL;
935
-release_netdev:
936
- dev_put(netdev);
9371258 release_lock:
9381259 up_read(&device_offload_lock);
1260
+release_netdev:
1261
+ dev_put(netdev);
9391262 return rc;
9401263 }
9411264
....@@ -983,6 +1306,26 @@
9831306 spin_unlock_irqrestore(&tls_device_lock, flags);
9841307
9851308 list_for_each_entry_safe(ctx, tmp, &list, list) {
1309
+ /* Stop offloaded TX and switch to the fallback.
1310
+ * tls_is_sk_tx_device_offloaded will return false.
1311
+ */
1312
+ WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
1313
+
1314
+ /* Stop the RX and TX resync.
1315
+ * tls_dev_resync must not be called after tls_dev_del.
1316
+ */
1317
+ WRITE_ONCE(ctx->netdev, NULL);
1318
+
1319
+ /* Start skipping the RX resync logic completely. */
1320
+ set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
1321
+
1322
+ /* Sync with inflight packets. After this point:
1323
+ * TX: no non-encrypted packets will be passed to the driver.
1324
+ * RX: resync requests from the driver will be ignored.
1325
+ */
1326
+ synchronize_net();
1327
+
1328
+ /* Release the offload context on the driver side. */
9861329 if (ctx->tx_conf == TLS_HW)
9871330 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
9881331 TLS_OFFLOAD_CTX_DIR_TX);
....@@ -990,15 +1333,29 @@
9901333 !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
9911334 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
9921335 TLS_OFFLOAD_CTX_DIR_RX);
993
- WRITE_ONCE(ctx->netdev, NULL);
994
- smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
995
- while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
996
- usleep_range(10, 200);
997
- dev_put(netdev);
998
- list_del_init(&ctx->list);
9991336
1000
- if (refcount_dec_and_test(&ctx->refcount))
1337
+ dev_put(netdev);
1338
+
1339
+ /* Move the context to a separate list for two reasons:
1340
+ * 1. When the context is deallocated, list_del is called.
1341
+ * 2. It's no longer an offloaded context, so we don't want to
1342
+ * run offload-specific code on this context.
1343
+ */
1344
+ spin_lock_irqsave(&tls_device_lock, flags);
1345
+ list_move_tail(&ctx->list, &tls_device_down_list);
1346
+ spin_unlock_irqrestore(&tls_device_lock, flags);
1347
+
1348
+ /* Device contexts for RX and TX will be freed in on sk_destruct
1349
+ * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
1350
+ * Now release the ref taken above.
1351
+ */
1352
+ if (refcount_dec_and_test(&ctx->refcount)) {
1353
+ /* sk_destruct ran after tls_device_down took a ref, and
1354
+ * it returned early. Complete the destruction here.
1355
+ */
1356
+ list_del(&ctx->list);
10011357 tls_device_free_ctx(ctx);
1358
+ }
10021359 }
10031360
10041361 up_write(&device_offload_lock);
....@@ -1021,7 +1378,7 @@
10211378 case NETDEV_REGISTER:
10221379 case NETDEV_FEAT_CHANGE:
10231380 if ((dev->features & NETIF_F_HW_TLS_RX) &&
1024
- !dev->tlsdev_ops->tls_dev_resync_rx)
1381
+ !dev->tlsdev_ops->tls_dev_resync)
10251382 return NOTIFY_BAD;
10261383
10271384 if (dev->tlsdev_ops &&
....@@ -1040,13 +1397,14 @@
10401397 .notifier_call = tls_dev_event,
10411398 };
10421399
1043
-void __init tls_device_init(void)
1400
+int __init tls_device_init(void)
10441401 {
1045
- register_netdevice_notifier(&tls_dev_notifier);
1402
+ return register_netdevice_notifier(&tls_dev_notifier);
10461403 }
10471404
10481405 void __exit tls_device_cleanup(void)
10491406 {
10501407 unregister_netdevice_notifier(&tls_dev_notifier);
10511408 flush_work(&tls_device_gc_work);
1409
+ clean_acked_data_flush();
10521410 }