.. | .. |
---|
43 | 43 | #include "discover.h" |
---|
44 | 44 | #include "netlink.h" |
---|
45 | 45 | #include "monitor.h" |
---|
| 46 | +#include "trace.h" |
---|
| 47 | +#include "crypto.h" |
---|
46 | 48 | |
---|
47 | 49 | #include <linux/pkt_sched.h> |
---|
48 | 50 | |
---|
.. | .. |
---|
105 | 107 | * @transmitq: queue for sent, non-acked messages |
---|
106 | 108 | * @backlogq: queue for messages waiting to be sent |
---|
107 | 109 | * @snt_nxt: next sequence number to use for outbound messages |
---|
108 | | - * @last_retransmitted: sequence number of most recently retransmitted message |
---|
109 | | - * @stale_cnt: counter for number of identical retransmit attempts |
---|
110 | | - * @stale_limit: time when repeated identical retransmits must force link reset |
---|
111 | 110 | * @ackers: # of peers that needs to ack each packet before it can be released |
---|
112 | 111 | * @acked: # last packet acked by a certain peer. Used for broadcast. |
---|
113 | 112 | * @rcv_nxt: next sequence number to expect for inbound messages |
---|
.. | .. |
---|
150 | 149 | /* Failover/synch */ |
---|
151 | 150 | u16 drop_point; |
---|
152 | 151 | struct sk_buff *failover_reasm_skb; |
---|
| 152 | + struct sk_buff_head failover_deferdq; |
---|
153 | 153 | |
---|
154 | 154 | /* Max packet negotiation */ |
---|
155 | 155 | u16 mtu; |
---|
.. | .. |
---|
164 | 164 | struct sk_buff *target_bskb; |
---|
165 | 165 | } backlog[5]; |
---|
166 | 166 | u16 snd_nxt; |
---|
167 | | - u16 last_retransm; |
---|
168 | | - u16 window; |
---|
169 | | - u16 stale_cnt; |
---|
170 | | - unsigned long stale_limit; |
---|
171 | 167 | |
---|
172 | 168 | /* Reception */ |
---|
173 | 169 | u16 rcv_nxt; |
---|
.. | .. |
---|
178 | 174 | |
---|
179 | 175 | /* Congestion handling */ |
---|
180 | 176 | struct sk_buff_head wakeupq; |
---|
| 177 | + u16 window; |
---|
| 178 | + u16 min_win; |
---|
| 179 | + u16 ssthresh; |
---|
| 180 | + u16 max_win; |
---|
| 181 | + u16 cong_acks; |
---|
| 182 | + u16 checkpoint; |
---|
181 | 183 | |
---|
182 | 184 | /* Fragmentation/reassembly */ |
---|
183 | 185 | struct sk_buff *reasm_buf; |
---|
| 186 | + struct sk_buff *reasm_tnlmsg; |
---|
184 | 187 | |
---|
185 | 188 | /* Broadcast */ |
---|
186 | 189 | u16 ackers; |
---|
187 | 190 | u16 acked; |
---|
| 191 | + u16 last_gap; |
---|
| 192 | + struct tipc_gap_ack_blks *last_ga; |
---|
188 | 193 | struct tipc_link *bc_rcvlink; |
---|
189 | 194 | struct tipc_link *bc_sndlink; |
---|
190 | | - unsigned long prev_retr; |
---|
191 | | - u16 prev_from; |
---|
192 | | - u16 prev_to; |
---|
193 | 195 | u8 nack_state; |
---|
194 | 196 | bool bc_peer_is_up; |
---|
195 | 197 | |
---|
.. | .. |
---|
211 | 213 | BC_NACK_SND_SUPPRESS, |
---|
212 | 214 | }; |
---|
213 | 215 | |
---|
214 | | -#define TIPC_BC_RETR_LIMIT 10 /* [ms] */ |
---|
215 | | - |
---|
216 | | -/* |
---|
217 | | - * Interval between NACKs when packets arrive out of order |
---|
218 | | - */ |
---|
219 | | -#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) |
---|
| 216 | +#define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10)) |
---|
| 217 | +#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1)) |
---|
220 | 218 | |
---|
221 | 219 | /* Link FSM states: |
---|
222 | 220 | */ |
---|
.. | .. |
---|
248 | 246 | struct sk_buff_head *xmitq); |
---|
249 | 247 | static void tipc_link_build_bc_init_msg(struct tipc_link *l, |
---|
250 | 248 | struct sk_buff_head *xmitq); |
---|
251 | | -static bool tipc_link_release_pkts(struct tipc_link *l, u16 to); |
---|
252 | | - |
---|
| 249 | +static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga, |
---|
| 250 | + struct tipc_link *l, u8 start_index); |
---|
| 251 | +static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr); |
---|
| 252 | +static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r, |
---|
| 253 | + u16 acked, u16 gap, |
---|
| 254 | + struct tipc_gap_ack_blks *ga, |
---|
| 255 | + struct sk_buff_head *xmitq, |
---|
| 256 | + bool *retransmitted, int *rc); |
---|
| 257 | +static void tipc_link_update_cwin(struct tipc_link *l, int released, |
---|
| 258 | + bool retransmitted); |
---|
253 | 259 | /* |
---|
254 | 260 | * Simple non-static link routines (i.e. referenced outside this file) |
---|
255 | 261 | */ |
---|
.. | .. |
---|
308 | 314 | return l->peer_bearer_id << 16 | l->bearer_id; |
---|
309 | 315 | } |
---|
310 | 316 | |
---|
311 | | -int tipc_link_window(struct tipc_link *l) |
---|
| 317 | +int tipc_link_min_win(struct tipc_link *l) |
---|
312 | 318 | { |
---|
313 | | - return l->window; |
---|
| 319 | + return l->min_win; |
---|
| 320 | +} |
---|
| 321 | + |
---|
| 322 | +int tipc_link_max_win(struct tipc_link *l) |
---|
| 323 | +{ |
---|
| 324 | + return l->max_win; |
---|
314 | 325 | } |
---|
315 | 326 | |
---|
316 | 327 | int tipc_link_prio(struct tipc_link *l) |
---|
.. | .. |
---|
331 | 342 | char tipc_link_plane(struct tipc_link *l) |
---|
332 | 343 | { |
---|
333 | 344 | return l->net_plane; |
---|
| 345 | +} |
---|
| 346 | + |
---|
| 347 | +struct net *tipc_link_net(struct tipc_link *l) |
---|
| 348 | +{ |
---|
| 349 | + return l->net; |
---|
334 | 350 | } |
---|
335 | 351 | |
---|
336 | 352 | void tipc_link_update_caps(struct tipc_link *l, u16 capabilities) |
---|
.. | .. |
---|
359 | 375 | snd_l->ackers--; |
---|
360 | 376 | rcv_l->bc_peer_is_up = true; |
---|
361 | 377 | rcv_l->state = LINK_ESTABLISHED; |
---|
362 | | - tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); |
---|
| 378 | + tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL); |
---|
| 379 | + trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!"); |
---|
363 | 380 | tipc_link_reset(rcv_l); |
---|
364 | 381 | rcv_l->state = LINK_RESET; |
---|
365 | 382 | if (!snd_l->ackers) { |
---|
| 383 | + trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!"); |
---|
366 | 384 | tipc_link_reset(snd_l); |
---|
367 | 385 | snd_l->state = LINK_RESET; |
---|
368 | 386 | __skb_queue_purge(xmitq); |
---|
.. | .. |
---|
396 | 414 | return l->mtu; |
---|
397 | 415 | } |
---|
398 | 416 | |
---|
| 417 | +int tipc_link_mss(struct tipc_link *l) |
---|
| 418 | +{ |
---|
| 419 | +#ifdef CONFIG_TIPC_CRYPTO |
---|
| 420 | + return l->mtu - INT_H_SIZE - EMSG_OVERHEAD; |
---|
| 421 | +#else |
---|
| 422 | + return l->mtu - INT_H_SIZE; |
---|
| 423 | +#endif |
---|
| 424 | +} |
---|
| 425 | + |
---|
399 | 426 | u16 tipc_link_rcv_nxt(struct tipc_link *l) |
---|
400 | 427 | { |
---|
401 | 428 | return l->rcv_nxt; |
---|
.. | .. |
---|
418 | 445 | |
---|
419 | 446 | /** |
---|
420 | 447 | * tipc_link_create - create a new link |
---|
421 | | - * @n: pointer to associated node |
---|
| 448 | + * @net: pointer to associated network namespace |
---|
422 | 449 | * @if_name: associated interface name |
---|
423 | 450 | * @bearer_id: id (index) of associated bearer |
---|
424 | 451 | * @tolerance: link tolerance to be used by link |
---|
425 | 452 | * @net_plane: network plane (A,B,c..) this link belongs to |
---|
426 | 453 | * @mtu: mtu to be advertised by link |
---|
427 | 454 | * @priority: priority to be used by link |
---|
428 | | - * @window: send window to be used by link |
---|
| 455 | + * @min_win: minimal send window to be used by link |
---|
| 456 | + * @max_win: maximal send window to be used by link |
---|
429 | 457 | * @session: session to be used by link |
---|
430 | 458 | * @ownnode: identity of own node |
---|
431 | 459 | * @peer: node id of peer node |
---|
.. | .. |
---|
440 | 468 | */ |
---|
441 | 469 | bool tipc_link_create(struct net *net, char *if_name, int bearer_id, |
---|
442 | 470 | int tolerance, char net_plane, u32 mtu, int priority, |
---|
443 | | - int window, u32 session, u32 self, |
---|
| 471 | + u32 min_win, u32 max_win, u32 session, u32 self, |
---|
444 | 472 | u32 peer, u8 *peer_id, u16 peer_caps, |
---|
445 | 473 | struct tipc_link *bc_sndlink, |
---|
446 | 474 | struct tipc_link *bc_rcvlink, |
---|
.. | .. |
---|
484 | 512 | l->advertised_mtu = mtu; |
---|
485 | 513 | l->mtu = mtu; |
---|
486 | 514 | l->priority = priority; |
---|
487 | | - tipc_link_set_queue_limits(l, window); |
---|
| 515 | + tipc_link_set_queue_limits(l, min_win, max_win); |
---|
488 | 516 | l->ackers = 1; |
---|
489 | 517 | l->bc_sndlink = bc_sndlink; |
---|
490 | 518 | l->bc_rcvlink = bc_rcvlink; |
---|
.. | .. |
---|
494 | 522 | __skb_queue_head_init(&l->transmq); |
---|
495 | 523 | __skb_queue_head_init(&l->backlogq); |
---|
496 | 524 | __skb_queue_head_init(&l->deferdq); |
---|
| 525 | + __skb_queue_head_init(&l->failover_deferdq); |
---|
497 | 526 | skb_queue_head_init(&l->wakeupq); |
---|
498 | 527 | skb_queue_head_init(l->inputq); |
---|
499 | 528 | return true; |
---|
.. | .. |
---|
501 | 530 | |
---|
502 | 531 | /** |
---|
503 | 532 | * tipc_link_bc_create - create new link to be used for broadcast |
---|
504 | | - * @n: pointer to associated node |
---|
| 533 | + * @net: pointer to associated network namespace |
---|
505 | 534 | * @mtu: mtu to be used initially if no peers |
---|
506 | | - * @window: send window to be used |
---|
| 535 | + * @min_win: minimal send window to be used by link |
---|
| 536 | + * @max_win: maximal send window to be used by link |
---|
507 | 537 | * @inputq: queue to put messages ready for delivery |
---|
508 | 538 | * @namedq: queue to put binding table update messages ready for delivery |
---|
509 | 539 | * @link: return value, pointer to put the created link |
---|
510 | 540 | * |
---|
511 | 541 | * Returns true if link was created, otherwise false |
---|
512 | 542 | */ |
---|
513 | | -bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, |
---|
514 | | - int mtu, int window, u16 peer_caps, |
---|
| 543 | +bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id, |
---|
| 544 | + int mtu, u32 min_win, u32 max_win, u16 peer_caps, |
---|
515 | 545 | struct sk_buff_head *inputq, |
---|
516 | 546 | struct sk_buff_head *namedq, |
---|
517 | 547 | struct tipc_link *bc_sndlink, |
---|
.. | .. |
---|
519 | 549 | { |
---|
520 | 550 | struct tipc_link *l; |
---|
521 | 551 | |
---|
522 | | - if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window, |
---|
523 | | - 0, ownnode, peer, NULL, peer_caps, bc_sndlink, |
---|
524 | | - NULL, inputq, namedq, link)) |
---|
| 552 | + if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win, |
---|
| 553 | + max_win, 0, ownnode, peer, NULL, peer_caps, |
---|
| 554 | + bc_sndlink, NULL, inputq, namedq, link)) |
---|
525 | 555 | return false; |
---|
526 | 556 | |
---|
527 | 557 | l = *link; |
---|
528 | | - strcpy(l->name, tipc_bclink_name); |
---|
| 558 | + if (peer_id) { |
---|
| 559 | + char peer_str[NODE_ID_STR_LEN] = {0,}; |
---|
| 560 | + |
---|
| 561 | + tipc_nodeid2string(peer_str, peer_id); |
---|
| 562 | + if (strlen(peer_str) > 16) |
---|
| 563 | + sprintf(peer_str, "%x", peer); |
---|
| 564 | + /* Broadcast receiver link name: "broadcast-link:<peer>" */ |
---|
| 565 | + snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name, |
---|
| 566 | + peer_str); |
---|
| 567 | + } else { |
---|
| 568 | + strcpy(l->name, tipc_bclink_name); |
---|
| 569 | + } |
---|
| 570 | + trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!"); |
---|
529 | 571 | tipc_link_reset(l); |
---|
530 | 572 | l->state = LINK_RESET; |
---|
531 | 573 | l->ackers = 0; |
---|
.. | .. |
---|
537 | 579 | |
---|
538 | 580 | /* Disable replicast if even a single peer doesn't support it */ |
---|
539 | 581 | if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST)) |
---|
540 | | - tipc_bcast_disable_rcast(net); |
---|
| 582 | + tipc_bcast_toggle_rcast(net, false); |
---|
541 | 583 | |
---|
542 | 584 | return true; |
---|
543 | 585 | } |
---|
.. | .. |
---|
550 | 592 | int tipc_link_fsm_evt(struct tipc_link *l, int evt) |
---|
551 | 593 | { |
---|
552 | 594 | int rc = 0; |
---|
| 595 | + int old_state = l->state; |
---|
553 | 596 | |
---|
554 | 597 | switch (l->state) { |
---|
555 | 598 | case LINK_RESETTING: |
---|
.. | .. |
---|
696 | 739 | default: |
---|
697 | 740 | pr_err("Unknown FSM state %x in %s\n", l->state, l->name); |
---|
698 | 741 | } |
---|
| 742 | + trace_tipc_link_fsm(l->name, old_state, l->state, evt); |
---|
699 | 743 | return rc; |
---|
700 | 744 | illegal_evt: |
---|
701 | 745 | pr_err("Illegal FSM event %x in state %x on link %s\n", |
---|
702 | 746 | evt, l->state, l->name); |
---|
| 747 | + trace_tipc_link_fsm(l->name, old_state, l->state, evt); |
---|
703 | 748 | return rc; |
---|
704 | 749 | } |
---|
705 | 750 | |
---|
.. | .. |
---|
724 | 769 | if (msg_user(msg) == MSG_FRAGMENTER) { |
---|
725 | 770 | if (msg_type(msg) != FIRST_FRAGMENT) |
---|
726 | 771 | return; |
---|
727 | | - length = msg_size(msg_get_wrapped(msg)); |
---|
| 772 | + length = msg_size(msg_inner_hdr(msg)); |
---|
728 | 773 | } |
---|
729 | 774 | l->stats.msg_lengths_total += length; |
---|
730 | 775 | l->stats.msg_length_counts++; |
---|
.. | .. |
---|
744 | 789 | l->stats.msg_length_profile[6]++; |
---|
745 | 790 | } |
---|
746 | 791 | |
---|
| 792 | +/** |
---|
| 793 | + * tipc_link_too_silent - check if link is "too silent" |
---|
| 794 | + * @l: tipc link to be checked |
---|
| 795 | + * |
---|
| 796 | + * Returns true if the link 'silent_intv_cnt' is about to reach the |
---|
| 797 | + * 'abort_limit' value, otherwise false |
---|
| 798 | + */ |
---|
| 799 | +bool tipc_link_too_silent(struct tipc_link *l) |
---|
| 800 | +{ |
---|
| 801 | + return (l->silent_intv_cnt + 2 > l->abort_limit); |
---|
| 802 | +} |
---|
| 803 | + |
---|
747 | 804 | /* tipc_link_timeout - perform periodic task as instructed from node timeout |
---|
748 | 805 | */ |
---|
749 | 806 | int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) |
---|
.. | .. |
---|
757 | 814 | u16 bc_acked = l->bc_rcvlink->acked; |
---|
758 | 815 | struct tipc_mon_state *mstate = &l->mon_state; |
---|
759 | 816 | |
---|
| 817 | + trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " "); |
---|
| 818 | + trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " "); |
---|
760 | 819 | switch (l->state) { |
---|
761 | 820 | case LINK_ESTABLISHED: |
---|
762 | 821 | case LINK_SYNCHING: |
---|
.. | .. |
---|
769 | 828 | state |= l->bc_rcvlink->rcv_unacked; |
---|
770 | 829 | state |= l->rcv_unacked; |
---|
771 | 830 | state |= !skb_queue_empty(&l->transmq); |
---|
772 | | - state |= !skb_queue_empty(&l->deferdq); |
---|
773 | 831 | probe = mstate->probing; |
---|
774 | 832 | probe |= l->silent_intv_cnt; |
---|
775 | 833 | if (probe || mstate->monitoring) |
---|
776 | 834 | l->silent_intv_cnt++; |
---|
| 835 | + probe |= !skb_queue_empty(&l->deferdq); |
---|
| 836 | + if (l->snd_nxt == l->checkpoint) { |
---|
| 837 | + tipc_link_update_cwin(l, 0, 0); |
---|
| 838 | + probe = true; |
---|
| 839 | + } |
---|
| 840 | + l->checkpoint = l->snd_nxt; |
---|
777 | 841 | break; |
---|
778 | 842 | case LINK_RESET: |
---|
779 | 843 | setup = l->rst_cnt++ <= 4; |
---|
.. | .. |
---|
819 | 883 | TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr); |
---|
820 | 884 | skb_queue_tail(&l->wakeupq, skb); |
---|
821 | 885 | l->stats.link_congs++; |
---|
| 886 | + trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!"); |
---|
822 | 887 | return -ELINKCONG; |
---|
823 | 888 | } |
---|
824 | 889 | |
---|
.. | .. |
---|
857 | 922 | |
---|
858 | 923 | } |
---|
859 | 924 | |
---|
| 925 | +/** |
---|
| 926 | + * tipc_link_set_skb_retransmit_time - set the time at which retransmission of |
---|
| 927 | + * the given skb should be next attempted |
---|
| 928 | + * @skb: skb to set a future retransmission time for |
---|
| 929 | + * @l: link the skb will be transmitted on |
---|
| 930 | + */ |
---|
| 931 | +static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb, |
---|
| 932 | + struct tipc_link *l) |
---|
| 933 | +{ |
---|
| 934 | + if (link_is_bc_sndlink(l)) |
---|
| 935 | + TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; |
---|
| 936 | + else |
---|
| 937 | + TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; |
---|
| 938 | +} |
---|
| 939 | + |
---|
860 | 940 | void tipc_link_reset(struct tipc_link *l) |
---|
861 | 941 | { |
---|
862 | 942 | struct sk_buff_head list; |
---|
.. | .. |
---|
865 | 945 | __skb_queue_head_init(&list); |
---|
866 | 946 | |
---|
867 | 947 | l->in_session = false; |
---|
| 948 | + /* Force re-synch of peer session number before establishing */ |
---|
| 949 | + l->peer_session--; |
---|
868 | 950 | l->session++; |
---|
869 | 951 | l->mtu = l->advertised_mtu; |
---|
870 | 952 | |
---|
.. | .. |
---|
879 | 961 | __skb_queue_purge(&l->transmq); |
---|
880 | 962 | __skb_queue_purge(&l->deferdq); |
---|
881 | 963 | __skb_queue_purge(&l->backlogq); |
---|
| 964 | + __skb_queue_purge(&l->failover_deferdq); |
---|
882 | 965 | for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) { |
---|
883 | 966 | l->backlog[imp].len = 0; |
---|
884 | 967 | l->backlog[imp].target_bskb = NULL; |
---|
885 | 968 | } |
---|
886 | 969 | kfree_skb(l->reasm_buf); |
---|
| 970 | + kfree_skb(l->reasm_tnlmsg); |
---|
887 | 971 | kfree_skb(l->failover_reasm_skb); |
---|
888 | 972 | l->reasm_buf = NULL; |
---|
| 973 | + l->reasm_tnlmsg = NULL; |
---|
889 | 974 | l->failover_reasm_skb = NULL; |
---|
890 | 975 | l->rcv_unacked = 0; |
---|
891 | 976 | l->snd_nxt = 1; |
---|
.. | .. |
---|
893 | 978 | l->snd_nxt_state = 1; |
---|
894 | 979 | l->rcv_nxt_state = 1; |
---|
895 | 980 | l->acked = 0; |
---|
| 981 | + l->last_gap = 0; |
---|
| 982 | + kfree(l->last_ga); |
---|
| 983 | + l->last_ga = NULL; |
---|
896 | 984 | l->silent_intv_cnt = 0; |
---|
897 | 985 | l->rst_cnt = 0; |
---|
898 | | - l->stale_cnt = 0; |
---|
899 | 986 | l->bc_peer_is_up = false; |
---|
900 | 987 | memset(&l->mon_state, 0, sizeof(l->mon_state)); |
---|
901 | 988 | tipc_link_reset_stats(l); |
---|
.. | .. |
---|
903 | 990 | |
---|
904 | 991 | /** |
---|
905 | 992 | * tipc_link_xmit(): enqueue buffer list according to queue situation |
---|
906 | | - * @link: link to use |
---|
| 993 | + * @l: link to use |
---|
907 | 994 | * @list: chain of buffers containing message |
---|
908 | 995 | * @xmitq: returned list of packets to be sent by caller |
---|
909 | 996 | * |
---|
.. | .. |
---|
914 | 1001 | int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, |
---|
915 | 1002 | struct sk_buff_head *xmitq) |
---|
916 | 1003 | { |
---|
917 | | - unsigned int maxwin = l->window; |
---|
918 | | - unsigned int mtu = l->mtu; |
---|
| 1004 | + struct sk_buff_head *backlogq = &l->backlogq; |
---|
| 1005 | + struct sk_buff_head *transmq = &l->transmq; |
---|
| 1006 | + struct sk_buff *skb, *_skb; |
---|
| 1007 | + u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; |
---|
919 | 1008 | u16 ack = l->rcv_nxt - 1; |
---|
920 | 1009 | u16 seqno = l->snd_nxt; |
---|
921 | | - u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; |
---|
922 | | - struct sk_buff_head *transmq = &l->transmq; |
---|
923 | | - struct sk_buff_head *backlogq = &l->backlogq; |
---|
924 | | - struct sk_buff *skb, *_skb, **tskb; |
---|
925 | 1010 | int pkt_cnt = skb_queue_len(list); |
---|
| 1011 | + unsigned int mss = tipc_link_mss(l); |
---|
| 1012 | + unsigned int cwin = l->window; |
---|
| 1013 | + unsigned int mtu = l->mtu; |
---|
926 | 1014 | struct tipc_msg *hdr; |
---|
| 1015 | + bool new_bundle; |
---|
927 | 1016 | int rc = 0; |
---|
928 | 1017 | int imp; |
---|
929 | 1018 | |
---|
.. | .. |
---|
932 | 1021 | |
---|
933 | 1022 | hdr = buf_msg(skb_peek(list)); |
---|
934 | 1023 | if (unlikely(msg_size(hdr) > mtu)) { |
---|
| 1024 | + pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n", |
---|
| 1025 | + skb_queue_len(list), msg_user(hdr), |
---|
| 1026 | + msg_type(hdr), msg_size(hdr), mtu); |
---|
935 | 1027 | __skb_queue_purge(list); |
---|
936 | 1028 | return -EMSGSIZE; |
---|
937 | 1029 | } |
---|
.. | .. |
---|
952 | 1044 | } |
---|
953 | 1045 | |
---|
954 | 1046 | /* Prepare each packet for sending, and add to relevant queue: */ |
---|
955 | | - while (skb_queue_len(list)) { |
---|
956 | | - skb = skb_peek(list); |
---|
957 | | - hdr = buf_msg(skb); |
---|
958 | | - msg_set_seqno(hdr, seqno); |
---|
959 | | - msg_set_ack(hdr, ack); |
---|
960 | | - msg_set_bcast_ack(hdr, bc_ack); |
---|
961 | | - |
---|
962 | | - if (likely(skb_queue_len(transmq) < maxwin)) { |
---|
| 1047 | + while ((skb = __skb_dequeue(list))) { |
---|
| 1048 | + if (likely(skb_queue_len(transmq) < cwin)) { |
---|
| 1049 | + hdr = buf_msg(skb); |
---|
| 1050 | + msg_set_seqno(hdr, seqno); |
---|
| 1051 | + msg_set_ack(hdr, ack); |
---|
| 1052 | + msg_set_bcast_ack(hdr, bc_ack); |
---|
963 | 1053 | _skb = skb_clone(skb, GFP_ATOMIC); |
---|
964 | 1054 | if (!_skb) { |
---|
| 1055 | + kfree_skb(skb); |
---|
965 | 1056 | __skb_queue_purge(list); |
---|
966 | 1057 | return -ENOBUFS; |
---|
967 | 1058 | } |
---|
968 | | - __skb_dequeue(list); |
---|
969 | 1059 | __skb_queue_tail(transmq, skb); |
---|
| 1060 | + tipc_link_set_skb_retransmit_time(skb, l); |
---|
970 | 1061 | __skb_queue_tail(xmitq, _skb); |
---|
971 | 1062 | TIPC_SKB_CB(skb)->ackers = l->ackers; |
---|
972 | 1063 | l->rcv_unacked = 0; |
---|
.. | .. |
---|
974 | 1065 | seqno++; |
---|
975 | 1066 | continue; |
---|
976 | 1067 | } |
---|
977 | | - tskb = &l->backlog[imp].target_bskb; |
---|
978 | | - if (tipc_msg_bundle(*tskb, hdr, mtu)) { |
---|
979 | | - kfree_skb(__skb_dequeue(list)); |
---|
980 | | - l->stats.sent_bundled++; |
---|
981 | | - continue; |
---|
982 | | - } |
---|
983 | | - if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) { |
---|
984 | | - kfree_skb(__skb_dequeue(list)); |
---|
985 | | - __skb_queue_tail(backlogq, *tskb); |
---|
986 | | - l->backlog[imp].len++; |
---|
987 | | - l->stats.sent_bundled++; |
---|
988 | | - l->stats.sent_bundles++; |
---|
| 1068 | + if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb, |
---|
| 1069 | + mss, l->addr, &new_bundle)) { |
---|
| 1070 | + if (skb) { |
---|
| 1071 | + /* Keep a ref. to the skb for next try */ |
---|
| 1072 | + l->backlog[imp].target_bskb = skb; |
---|
| 1073 | + l->backlog[imp].len++; |
---|
| 1074 | + __skb_queue_tail(backlogq, skb); |
---|
| 1075 | + } else { |
---|
| 1076 | + if (new_bundle) { |
---|
| 1077 | + l->stats.sent_bundles++; |
---|
| 1078 | + l->stats.sent_bundled++; |
---|
| 1079 | + } |
---|
| 1080 | + l->stats.sent_bundled++; |
---|
| 1081 | + } |
---|
989 | 1082 | continue; |
---|
990 | 1083 | } |
---|
991 | 1084 | l->backlog[imp].target_bskb = NULL; |
---|
992 | | - l->backlog[imp].len += skb_queue_len(list); |
---|
| 1085 | + l->backlog[imp].len += (1 + skb_queue_len(list)); |
---|
| 1086 | + __skb_queue_tail(backlogq, skb); |
---|
993 | 1087 | skb_queue_splice_tail_init(list, backlogq); |
---|
994 | 1088 | } |
---|
995 | 1089 | l->snd_nxt = seqno; |
---|
996 | 1090 | return rc; |
---|
997 | 1091 | } |
---|
998 | 1092 | |
---|
| 1093 | +static void tipc_link_update_cwin(struct tipc_link *l, int released, |
---|
| 1094 | + bool retransmitted) |
---|
| 1095 | +{ |
---|
| 1096 | + int bklog_len = skb_queue_len(&l->backlogq); |
---|
| 1097 | + struct sk_buff_head *txq = &l->transmq; |
---|
| 1098 | + int txq_len = skb_queue_len(txq); |
---|
| 1099 | + u16 cwin = l->window; |
---|
| 1100 | + |
---|
| 1101 | + /* Enter fast recovery */ |
---|
| 1102 | + if (unlikely(retransmitted)) { |
---|
| 1103 | + l->ssthresh = max_t(u16, l->window / 2, 300); |
---|
| 1104 | + l->window = min_t(u16, l->ssthresh, l->window); |
---|
| 1105 | + return; |
---|
| 1106 | + } |
---|
| 1107 | + /* Enter slow start */ |
---|
| 1108 | + if (unlikely(!released)) { |
---|
| 1109 | + l->ssthresh = max_t(u16, l->window / 2, 300); |
---|
| 1110 | + l->window = l->min_win; |
---|
| 1111 | + return; |
---|
| 1112 | + } |
---|
| 1113 | + /* Don't increase window if no pressure on the transmit queue */ |
---|
| 1114 | + if (txq_len + bklog_len < cwin) |
---|
| 1115 | + return; |
---|
| 1116 | + |
---|
| 1117 | + /* Don't increase window if there are holes the transmit queue */ |
---|
| 1118 | + if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len) |
---|
| 1119 | + return; |
---|
| 1120 | + |
---|
| 1121 | + l->cong_acks += released; |
---|
| 1122 | + |
---|
| 1123 | + /* Slow start */ |
---|
| 1124 | + if (cwin <= l->ssthresh) { |
---|
| 1125 | + l->window = min_t(u16, cwin + released, l->max_win); |
---|
| 1126 | + return; |
---|
| 1127 | + } |
---|
| 1128 | + /* Congestion avoidance */ |
---|
| 1129 | + if (l->cong_acks < cwin) |
---|
| 1130 | + return; |
---|
| 1131 | + l->window = min_t(u16, ++cwin, l->max_win); |
---|
| 1132 | + l->cong_acks = 0; |
---|
| 1133 | +} |
---|
| 1134 | + |
---|
999 | 1135 | static void tipc_link_advance_backlog(struct tipc_link *l, |
---|
1000 | 1136 | struct sk_buff_head *xmitq) |
---|
1001 | 1137 | { |
---|
1002 | | - struct sk_buff *skb, *_skb; |
---|
1003 | | - struct tipc_msg *hdr; |
---|
1004 | | - u16 seqno = l->snd_nxt; |
---|
1005 | | - u16 ack = l->rcv_nxt - 1; |
---|
1006 | 1138 | u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; |
---|
| 1139 | + struct sk_buff_head *txq = &l->transmq; |
---|
| 1140 | + struct sk_buff *skb, *_skb; |
---|
| 1141 | + u16 ack = l->rcv_nxt - 1; |
---|
| 1142 | + u16 seqno = l->snd_nxt; |
---|
| 1143 | + struct tipc_msg *hdr; |
---|
| 1144 | + u16 cwin = l->window; |
---|
1007 | 1145 | u32 imp; |
---|
1008 | 1146 | |
---|
1009 | | - while (skb_queue_len(&l->transmq) < l->window) { |
---|
| 1147 | + while (skb_queue_len(txq) < cwin) { |
---|
1010 | 1148 | skb = skb_peek(&l->backlogq); |
---|
1011 | 1149 | if (!skb) |
---|
1012 | 1150 | break; |
---|
.. | .. |
---|
1020 | 1158 | if (unlikely(skb == l->backlog[imp].target_bskb)) |
---|
1021 | 1159 | l->backlog[imp].target_bskb = NULL; |
---|
1022 | 1160 | __skb_queue_tail(&l->transmq, skb); |
---|
| 1161 | + tipc_link_set_skb_retransmit_time(skb, l); |
---|
| 1162 | + |
---|
1023 | 1163 | __skb_queue_tail(xmitq, _skb); |
---|
1024 | 1164 | TIPC_SKB_CB(skb)->ackers = l->ackers; |
---|
1025 | 1165 | msg_set_seqno(hdr, seqno); |
---|
.. | .. |
---|
1032 | 1172 | l->snd_nxt = seqno; |
---|
1033 | 1173 | } |
---|
1034 | 1174 | |
---|
1035 | | -static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb) |
---|
| 1175 | +/** |
---|
| 1176 | + * link_retransmit_failure() - Detect repeated retransmit failures |
---|
| 1177 | + * @l: tipc link sender |
---|
| 1178 | + * @r: tipc link receiver (= l in case of unicast) |
---|
| 1179 | + * @rc: returned code |
---|
| 1180 | + * |
---|
| 1181 | + * Return: true if the repeated retransmit failures happens, otherwise |
---|
| 1182 | + * false |
---|
| 1183 | + */ |
---|
| 1184 | +static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r, |
---|
| 1185 | + int *rc) |
---|
1036 | 1186 | { |
---|
1037 | | - struct tipc_msg *hdr = buf_msg(skb); |
---|
| 1187 | + struct sk_buff *skb = skb_peek(&l->transmq); |
---|
| 1188 | + struct tipc_msg *hdr; |
---|
| 1189 | + |
---|
| 1190 | + if (!skb) |
---|
| 1191 | + return false; |
---|
| 1192 | + |
---|
| 1193 | + if (!TIPC_SKB_CB(skb)->retr_cnt) |
---|
| 1194 | + return false; |
---|
| 1195 | + |
---|
| 1196 | + if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp + |
---|
| 1197 | + msecs_to_jiffies(r->tolerance * 10))) |
---|
| 1198 | + return false; |
---|
| 1199 | + |
---|
| 1200 | + hdr = buf_msg(skb); |
---|
| 1201 | + if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr))) |
---|
| 1202 | + return false; |
---|
1038 | 1203 | |
---|
1039 | 1204 | pr_warn("Retransmission failure on link <%s>\n", l->name); |
---|
1040 | 1205 | link_print(l, "State of link "); |
---|
1041 | 1206 | pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", |
---|
1042 | 1207 | msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr)); |
---|
1043 | | - pr_info("sqno %u, prev: %x, src: %x\n", |
---|
1044 | | - msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr)); |
---|
1045 | | -} |
---|
| 1208 | + pr_info("sqno %u, prev: %x, dest: %x\n", |
---|
| 1209 | + msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr)); |
---|
| 1210 | + pr_info("retr_stamp %d, retr_cnt %d\n", |
---|
| 1211 | + jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp), |
---|
| 1212 | + TIPC_SKB_CB(skb)->retr_cnt); |
---|
1046 | 1213 | |
---|
1047 | | -/* tipc_link_retrans() - retransmit one or more packets |
---|
1048 | | - * @l: the link to transmit on |
---|
1049 | | - * @r: the receiving link ordering the retransmit. Same as l if unicast |
---|
1050 | | - * @from: retransmit from (inclusive) this sequence number |
---|
1051 | | - * @to: retransmit to (inclusive) this sequence number |
---|
1052 | | - * xmitq: queue for accumulating the retransmitted packets |
---|
1053 | | - */ |
---|
1054 | | -static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r, |
---|
1055 | | - u16 from, u16 to, struct sk_buff_head *xmitq) |
---|
1056 | | -{ |
---|
1057 | | - struct sk_buff *_skb, *skb = skb_peek(&l->transmq); |
---|
1058 | | - u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; |
---|
1059 | | - u16 ack = l->rcv_nxt - 1; |
---|
1060 | | - struct tipc_msg *hdr; |
---|
| 1214 | + trace_tipc_list_dump(&l->transmq, true, "retrans failure!"); |
---|
| 1215 | + trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!"); |
---|
| 1216 | + trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!"); |
---|
1061 | 1217 | |
---|
1062 | | - if (!skb) |
---|
1063 | | - return 0; |
---|
1064 | | - |
---|
1065 | | - /* Detect repeated retransmit failures on same packet */ |
---|
1066 | | - if (r->last_retransm != buf_seqno(skb)) { |
---|
1067 | | - r->last_retransm = buf_seqno(skb); |
---|
1068 | | - r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance); |
---|
1069 | | - r->stale_cnt = 0; |
---|
1070 | | - } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { |
---|
1071 | | - link_retransmit_failure(l, skb); |
---|
1072 | | - if (link_is_bc_sndlink(l)) |
---|
1073 | | - return TIPC_LINK_DOWN_EVT; |
---|
1074 | | - return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); |
---|
| 1218 | + if (link_is_bc_sndlink(l)) { |
---|
| 1219 | + r->state = LINK_RESET; |
---|
| 1220 | + *rc |= TIPC_LINK_DOWN_EVT; |
---|
| 1221 | + } else { |
---|
| 1222 | + *rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT); |
---|
1075 | 1223 | } |
---|
1076 | 1224 | |
---|
1077 | | - skb_queue_walk(&l->transmq, skb) { |
---|
1078 | | - hdr = buf_msg(skb); |
---|
1079 | | - if (less(msg_seqno(hdr), from)) |
---|
1080 | | - continue; |
---|
1081 | | - if (more(msg_seqno(hdr), to)) |
---|
1082 | | - break; |
---|
1083 | | - _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); |
---|
1084 | | - if (!_skb) |
---|
1085 | | - return 0; |
---|
1086 | | - hdr = buf_msg(_skb); |
---|
1087 | | - msg_set_ack(hdr, ack); |
---|
1088 | | - msg_set_bcast_ack(hdr, bc_ack); |
---|
1089 | | - _skb->priority = TC_PRIO_CONTROL; |
---|
1090 | | - __skb_queue_tail(xmitq, _skb); |
---|
1091 | | - l->stats.retransmitted++; |
---|
1092 | | - } |
---|
1093 | | - return 0; |
---|
| 1225 | + return true; |
---|
1094 | 1226 | } |
---|
1095 | 1227 | |
---|
1096 | 1228 | /* tipc_data_input - deliver data and name distr msgs to upper layer |
---|
.. | .. |
---|
1113 | 1245 | skb_queue_tail(mc_inputq, skb); |
---|
1114 | 1246 | return true; |
---|
1115 | 1247 | } |
---|
1116 | | - /* else: fall through */ |
---|
| 1248 | + fallthrough; |
---|
1117 | 1249 | case CONN_MANAGER: |
---|
1118 | 1250 | skb_queue_tail(inputq, skb); |
---|
1119 | 1251 | return true; |
---|
.. | .. |
---|
1129 | 1261 | case MSG_FRAGMENTER: |
---|
1130 | 1262 | case BCAST_PROTOCOL: |
---|
1131 | 1263 | return false; |
---|
| 1264 | +#ifdef CONFIG_TIPC_CRYPTO |
---|
| 1265 | + case MSG_CRYPTO: |
---|
| 1266 | + if (TIPC_SKB_CB(skb)->decrypted) { |
---|
| 1267 | + tipc_crypto_msg_rcv(l->net, skb); |
---|
| 1268 | + return true; |
---|
| 1269 | + } |
---|
| 1270 | + fallthrough; |
---|
| 1271 | +#endif |
---|
1132 | 1272 | default: |
---|
1133 | 1273 | pr_warn("Dropping received illegal msg type\n"); |
---|
1134 | 1274 | kfree_skb(skb); |
---|
.. | .. |
---|
1141 | 1281 | * Consumes buffer |
---|
1142 | 1282 | */ |
---|
1143 | 1283 | static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, |
---|
1144 | | - struct sk_buff_head *inputq) |
---|
| 1284 | + struct sk_buff_head *inputq, |
---|
| 1285 | + struct sk_buff **reasm_skb) |
---|
1145 | 1286 | { |
---|
1146 | 1287 | struct tipc_msg *hdr = buf_msg(skb); |
---|
1147 | | - struct sk_buff **reasm_skb = &l->reasm_buf; |
---|
1148 | 1288 | struct sk_buff *iskb; |
---|
1149 | 1289 | struct sk_buff_head tmpq; |
---|
1150 | 1290 | int usr = msg_user(hdr); |
---|
1151 | | - int rc = 0; |
---|
1152 | 1291 | int pos = 0; |
---|
1153 | | - int ipos = 0; |
---|
1154 | | - |
---|
1155 | | - if (unlikely(usr == TUNNEL_PROTOCOL)) { |
---|
1156 | | - if (msg_type(hdr) == SYNCH_MSG) { |
---|
1157 | | - __skb_queue_purge(&l->deferdq); |
---|
1158 | | - goto drop; |
---|
1159 | | - } |
---|
1160 | | - if (!tipc_msg_extract(skb, &iskb, &ipos)) |
---|
1161 | | - return rc; |
---|
1162 | | - kfree_skb(skb); |
---|
1163 | | - skb = iskb; |
---|
1164 | | - hdr = buf_msg(skb); |
---|
1165 | | - if (less(msg_seqno(hdr), l->drop_point)) |
---|
1166 | | - goto drop; |
---|
1167 | | - if (tipc_data_input(l, skb, inputq)) |
---|
1168 | | - return rc; |
---|
1169 | | - usr = msg_user(hdr); |
---|
1170 | | - reasm_skb = &l->failover_reasm_skb; |
---|
1171 | | - } |
---|
1172 | 1292 | |
---|
1173 | 1293 | if (usr == MSG_BUNDLER) { |
---|
1174 | 1294 | skb_queue_head_init(&tmpq); |
---|
.. | .. |
---|
1193 | 1313 | tipc_link_bc_init_rcv(l->bc_rcvlink, hdr); |
---|
1194 | 1314 | tipc_bcast_unlock(l->net); |
---|
1195 | 1315 | } |
---|
1196 | | -drop: |
---|
| 1316 | + |
---|
1197 | 1317 | kfree_skb(skb); |
---|
1198 | 1318 | return 0; |
---|
1199 | 1319 | } |
---|
1200 | 1320 | |
---|
1201 | | -static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) |
---|
| 1321 | +/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the |
---|
| 1322 | + * inner message along with the ones in the old link's |
---|
| 1323 | + * deferdq |
---|
| 1324 | + * @l: tunnel link |
---|
| 1325 | + * @skb: TUNNEL_PROTOCOL message |
---|
| 1326 | + * @inputq: queue to put messages ready for delivery |
---|
| 1327 | + */ |
---|
| 1328 | +static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb, |
---|
| 1329 | + struct sk_buff_head *inputq) |
---|
1202 | 1330 | { |
---|
1203 | | - bool released = false; |
---|
1204 | | - struct sk_buff *skb, *tmp; |
---|
| 1331 | + struct sk_buff **reasm_skb = &l->failover_reasm_skb; |
---|
| 1332 | + struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg; |
---|
| 1333 | + struct sk_buff_head *fdefq = &l->failover_deferdq; |
---|
| 1334 | + struct tipc_msg *hdr = buf_msg(skb); |
---|
| 1335 | + struct sk_buff *iskb; |
---|
| 1336 | + int ipos = 0; |
---|
| 1337 | + int rc = 0; |
---|
| 1338 | + u16 seqno; |
---|
1205 | 1339 | |
---|
1206 | | - skb_queue_walk_safe(&l->transmq, skb, tmp) { |
---|
1207 | | - if (more(buf_seqno(skb), acked)) |
---|
1208 | | - break; |
---|
1209 | | - __skb_unlink(skb, &l->transmq); |
---|
| 1340 | + if (msg_type(hdr) == SYNCH_MSG) { |
---|
1210 | 1341 | kfree_skb(skb); |
---|
1211 | | - released = true; |
---|
| 1342 | + return 0; |
---|
1212 | 1343 | } |
---|
1213 | | - return released; |
---|
| 1344 | + |
---|
| 1345 | + /* Not a fragment? */ |
---|
| 1346 | + if (likely(!msg_nof_fragms(hdr))) { |
---|
| 1347 | + if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) { |
---|
| 1348 | + pr_warn_ratelimited("Unable to extract msg, defq: %d\n", |
---|
| 1349 | + skb_queue_len(fdefq)); |
---|
| 1350 | + return 0; |
---|
| 1351 | + } |
---|
| 1352 | + kfree_skb(skb); |
---|
| 1353 | + } else { |
---|
| 1354 | + /* Set fragment type for buf_append */ |
---|
| 1355 | + if (msg_fragm_no(hdr) == 1) |
---|
| 1356 | + msg_set_type(hdr, FIRST_FRAGMENT); |
---|
| 1357 | + else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr)) |
---|
| 1358 | + msg_set_type(hdr, FRAGMENT); |
---|
| 1359 | + else |
---|
| 1360 | + msg_set_type(hdr, LAST_FRAGMENT); |
---|
| 1361 | + |
---|
| 1362 | + if (!tipc_buf_append(reasm_tnlmsg, &skb)) { |
---|
| 1363 | + /* Successful but non-complete reassembly? */ |
---|
| 1364 | + if (*reasm_tnlmsg || link_is_bc_rcvlink(l)) |
---|
| 1365 | + return 0; |
---|
| 1366 | + pr_warn_ratelimited("Unable to reassemble tunnel msg\n"); |
---|
| 1367 | + return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); |
---|
| 1368 | + } |
---|
| 1369 | + iskb = skb; |
---|
| 1370 | + } |
---|
| 1371 | + |
---|
| 1372 | + do { |
---|
| 1373 | + seqno = buf_seqno(iskb); |
---|
| 1374 | + if (unlikely(less(seqno, l->drop_point))) { |
---|
| 1375 | + kfree_skb(iskb); |
---|
| 1376 | + continue; |
---|
| 1377 | + } |
---|
| 1378 | + if (unlikely(seqno != l->drop_point)) { |
---|
| 1379 | + __tipc_skb_queue_sorted(fdefq, seqno, iskb); |
---|
| 1380 | + continue; |
---|
| 1381 | + } |
---|
| 1382 | + |
---|
| 1383 | + l->drop_point++; |
---|
| 1384 | + if (!tipc_data_input(l, iskb, inputq)) |
---|
| 1385 | + rc |= tipc_link_input(l, iskb, inputq, reasm_skb); |
---|
| 1386 | + if (unlikely(rc)) |
---|
| 1387 | + break; |
---|
| 1388 | + } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point))); |
---|
| 1389 | + |
---|
| 1390 | + return rc; |
---|
| 1391 | +} |
---|
| 1392 | + |
---|
| 1393 | +/** |
---|
| 1394 | + * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG |
---|
| 1395 | + * @ga: returned pointer to the Gap ACK blocks if any |
---|
| 1396 | + * @l: the tipc link |
---|
| 1397 | + * @hdr: the PROTOCOL/STATE_MSG header |
---|
| 1398 | + * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0) |
---|
| 1399 | + * |
---|
| 1400 | + * Return: the total Gap ACK blocks size |
---|
| 1401 | + */ |
---|
| 1402 | +u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l, |
---|
| 1403 | + struct tipc_msg *hdr, bool uc) |
---|
| 1404 | +{ |
---|
| 1405 | + struct tipc_gap_ack_blks *p; |
---|
| 1406 | + u16 sz = 0; |
---|
| 1407 | + |
---|
| 1408 | + /* Does peer support the Gap ACK blocks feature? */ |
---|
| 1409 | + if (l->peer_caps & TIPC_GAP_ACK_BLOCK) { |
---|
| 1410 | + p = (struct tipc_gap_ack_blks *)msg_data(hdr); |
---|
| 1411 | + sz = ntohs(p->len); |
---|
| 1412 | + /* Sanity check */ |
---|
| 1413 | + if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) { |
---|
| 1414 | + /* Good, check if the desired type exists */ |
---|
| 1415 | + if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt)) |
---|
| 1416 | + goto ok; |
---|
| 1417 | + /* Backward compatible: peer might not support bc, but uc? */ |
---|
| 1418 | + } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) { |
---|
| 1419 | + if (p->ugack_cnt) { |
---|
| 1420 | + p->bgack_cnt = 0; |
---|
| 1421 | + goto ok; |
---|
| 1422 | + } |
---|
| 1423 | + } |
---|
| 1424 | + } |
---|
| 1425 | + /* Other cases: ignore! */ |
---|
| 1426 | + p = NULL; |
---|
| 1427 | + |
---|
| 1428 | +ok: |
---|
| 1429 | + *ga = p; |
---|
| 1430 | + return sz; |
---|
| 1431 | +} |
---|
| 1432 | + |
---|
| 1433 | +static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga, |
---|
| 1434 | + struct tipc_link *l, u8 start_index) |
---|
| 1435 | +{ |
---|
| 1436 | + struct tipc_gap_ack *gacks = &ga->gacks[start_index]; |
---|
| 1437 | + struct sk_buff *skb = skb_peek(&l->deferdq); |
---|
| 1438 | + u16 expect, seqno = 0; |
---|
| 1439 | + u8 n = 0; |
---|
| 1440 | + |
---|
| 1441 | + if (!skb) |
---|
| 1442 | + return 0; |
---|
| 1443 | + |
---|
| 1444 | + expect = buf_seqno(skb); |
---|
| 1445 | + skb_queue_walk(&l->deferdq, skb) { |
---|
| 1446 | + seqno = buf_seqno(skb); |
---|
| 1447 | + if (unlikely(more(seqno, expect))) { |
---|
| 1448 | + gacks[n].ack = htons(expect - 1); |
---|
| 1449 | + gacks[n].gap = htons(seqno - expect); |
---|
| 1450 | + if (++n >= MAX_GAP_ACK_BLKS / 2) { |
---|
| 1451 | + pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n", |
---|
| 1452 | + l->name, n, |
---|
| 1453 | + skb_queue_len(&l->deferdq)); |
---|
| 1454 | + return n; |
---|
| 1455 | + } |
---|
| 1456 | + } else if (unlikely(less(seqno, expect))) { |
---|
| 1457 | + pr_warn("Unexpected skb in deferdq!\n"); |
---|
| 1458 | + continue; |
---|
| 1459 | + } |
---|
| 1460 | + expect = seqno + 1; |
---|
| 1461 | + } |
---|
| 1462 | + |
---|
| 1463 | + /* last block */ |
---|
| 1464 | + gacks[n].ack = htons(seqno); |
---|
| 1465 | + gacks[n].gap = 0; |
---|
| 1466 | + n++; |
---|
| 1467 | + return n; |
---|
| 1468 | +} |
---|
| 1469 | + |
---|
| 1470 | +/* tipc_build_gap_ack_blks - build Gap ACK blocks |
---|
| 1471 | + * @l: tipc unicast link |
---|
| 1472 | + * @hdr: the tipc message buffer to store the Gap ACK blocks after built |
---|
| 1473 | + * |
---|
| 1474 | + * The function builds Gap ACK blocks for both the unicast & broadcast receiver |
---|
| 1475 | + * links of a certain peer, the buffer after built has the network data format |
---|
| 1476 | + * as found at the struct tipc_gap_ack_blks definition. |
---|
| 1477 | + * |
---|
| 1478 | + * returns the actual allocated memory size |
---|
| 1479 | + */ |
---|
| 1480 | +static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr) |
---|
| 1481 | +{ |
---|
| 1482 | + struct tipc_link *bcl = l->bc_rcvlink; |
---|
| 1483 | + struct tipc_gap_ack_blks *ga; |
---|
| 1484 | + u16 len; |
---|
| 1485 | + |
---|
| 1486 | + ga = (struct tipc_gap_ack_blks *)msg_data(hdr); |
---|
| 1487 | + |
---|
| 1488 | + /* Start with broadcast link first */ |
---|
| 1489 | + tipc_bcast_lock(bcl->net); |
---|
| 1490 | + msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1); |
---|
| 1491 | + msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl)); |
---|
| 1492 | + ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0); |
---|
| 1493 | + tipc_bcast_unlock(bcl->net); |
---|
| 1494 | + |
---|
| 1495 | + /* Now for unicast link, but an explicit NACK only (???) */ |
---|
| 1496 | + ga->ugack_cnt = (msg_seq_gap(hdr)) ? |
---|
| 1497 | + __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0; |
---|
| 1498 | + |
---|
| 1499 | + /* Total len */ |
---|
| 1500 | + len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt); |
---|
| 1501 | + ga->len = htons(len); |
---|
| 1502 | + return len; |
---|
| 1503 | +} |
---|
| 1504 | + |
---|
| 1505 | +/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing |
---|
| 1506 | + * acked packets, also doing retransmissions if |
---|
| 1507 | + * gaps found |
---|
| 1508 | + * @l: tipc link with transmq queue to be advanced |
---|
| 1509 | + * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast) |
---|
| 1510 | + * @acked: seqno of last packet acked by peer without any gaps before |
---|
| 1511 | + * @gap: # of gap packets |
---|
| 1512 | + * @ga: buffer pointer to Gap ACK blocks from peer |
---|
| 1513 | + * @xmitq: queue for accumulating the retransmitted packets if any |
---|
| 1514 | + * @retransmitted: returned boolean value if a retransmission is really issued |
---|
| 1515 | + * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures |
---|
| 1516 | + * happens (- unlikely case) |
---|
| 1517 | + * |
---|
| 1518 | + * Return: the number of packets released from the link transmq |
---|
| 1519 | + */ |
---|
| 1520 | +static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r, |
---|
| 1521 | + u16 acked, u16 gap, |
---|
| 1522 | + struct tipc_gap_ack_blks *ga, |
---|
| 1523 | + struct sk_buff_head *xmitq, |
---|
| 1524 | + bool *retransmitted, int *rc) |
---|
| 1525 | +{ |
---|
| 1526 | + struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL; |
---|
| 1527 | + struct tipc_gap_ack *gacks = NULL; |
---|
| 1528 | + struct sk_buff *skb, *_skb, *tmp; |
---|
| 1529 | + struct tipc_msg *hdr; |
---|
| 1530 | + u32 qlen = skb_queue_len(&l->transmq); |
---|
| 1531 | + u16 nacked = acked, ngap = gap, gack_cnt = 0; |
---|
| 1532 | + u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; |
---|
| 1533 | + u16 ack = l->rcv_nxt - 1; |
---|
| 1534 | + u16 seqno, n = 0; |
---|
| 1535 | + u16 end = r->acked, start = end, offset = r->last_gap; |
---|
| 1536 | + u16 si = (last_ga) ? last_ga->start_index : 0; |
---|
| 1537 | + bool is_uc = !link_is_bc_sndlink(l); |
---|
| 1538 | + bool bc_has_acked = false; |
---|
| 1539 | + |
---|
| 1540 | + trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq); |
---|
| 1541 | + |
---|
| 1542 | + /* Determine Gap ACK blocks if any for the particular link */ |
---|
| 1543 | + if (ga && is_uc) { |
---|
| 1544 | + /* Get the Gap ACKs, uc part */ |
---|
| 1545 | + gack_cnt = ga->ugack_cnt; |
---|
| 1546 | + gacks = &ga->gacks[ga->bgack_cnt]; |
---|
| 1547 | + } else if (ga) { |
---|
| 1548 | + /* Copy the Gap ACKs, bc part, for later renewal if needed */ |
---|
| 1549 | + this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt), |
---|
| 1550 | + GFP_ATOMIC); |
---|
| 1551 | + if (likely(this_ga)) { |
---|
| 1552 | + this_ga->start_index = 0; |
---|
| 1553 | + /* Start with the bc Gap ACKs */ |
---|
| 1554 | + gack_cnt = this_ga->bgack_cnt; |
---|
| 1555 | + gacks = &this_ga->gacks[0]; |
---|
| 1556 | + } else { |
---|
| 1557 | + /* Hmm, we can get in trouble..., simply ignore it */ |
---|
| 1558 | + pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n"); |
---|
| 1559 | + } |
---|
| 1560 | + } |
---|
| 1561 | + |
---|
| 1562 | + /* Advance the link transmq */ |
---|
| 1563 | + skb_queue_walk_safe(&l->transmq, skb, tmp) { |
---|
| 1564 | + seqno = buf_seqno(skb); |
---|
| 1565 | + |
---|
| 1566 | +next_gap_ack: |
---|
| 1567 | + if (less_eq(seqno, nacked)) { |
---|
| 1568 | + if (is_uc) |
---|
| 1569 | + goto release; |
---|
| 1570 | + /* Skip packets peer has already acked */ |
---|
| 1571 | + if (!more(seqno, r->acked)) |
---|
| 1572 | + continue; |
---|
| 1573 | + /* Get the next of last Gap ACK blocks */ |
---|
| 1574 | + while (more(seqno, end)) { |
---|
| 1575 | + if (!last_ga || si >= last_ga->bgack_cnt) |
---|
| 1576 | + break; |
---|
| 1577 | + start = end + offset + 1; |
---|
| 1578 | + end = ntohs(last_ga->gacks[si].ack); |
---|
| 1579 | + offset = ntohs(last_ga->gacks[si].gap); |
---|
| 1580 | + si++; |
---|
| 1581 | + WARN_ONCE(more(start, end) || |
---|
| 1582 | + (!offset && |
---|
| 1583 | + si < last_ga->bgack_cnt) || |
---|
| 1584 | + si > MAX_GAP_ACK_BLKS, |
---|
| 1585 | + "Corrupted Gap ACK: %d %d %d %d %d\n", |
---|
| 1586 | + start, end, offset, si, |
---|
| 1587 | + last_ga->bgack_cnt); |
---|
| 1588 | + } |
---|
| 1589 | + /* Check against the last Gap ACK block */ |
---|
| 1590 | + if (in_range(seqno, start, end)) |
---|
| 1591 | + continue; |
---|
| 1592 | + /* Update/release the packet peer is acking */ |
---|
| 1593 | + bc_has_acked = true; |
---|
| 1594 | + if (--TIPC_SKB_CB(skb)->ackers) |
---|
| 1595 | + continue; |
---|
| 1596 | +release: |
---|
| 1597 | + /* release skb */ |
---|
| 1598 | + __skb_unlink(skb, &l->transmq); |
---|
| 1599 | + kfree_skb(skb); |
---|
| 1600 | + } else if (less_eq(seqno, nacked + ngap)) { |
---|
| 1601 | + /* First gap: check if repeated retrans failures? */ |
---|
| 1602 | + if (unlikely(seqno == acked + 1 && |
---|
| 1603 | + link_retransmit_failure(l, r, rc))) { |
---|
| 1604 | + /* Ignore this bc Gap ACKs if any */ |
---|
| 1605 | + kfree(this_ga); |
---|
| 1606 | + this_ga = NULL; |
---|
| 1607 | + break; |
---|
| 1608 | + } |
---|
| 1609 | + /* retransmit skb if unrestricted*/ |
---|
| 1610 | + if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) |
---|
| 1611 | + continue; |
---|
| 1612 | + tipc_link_set_skb_retransmit_time(skb, l); |
---|
| 1613 | + _skb = pskb_copy(skb, GFP_ATOMIC); |
---|
| 1614 | + if (!_skb) |
---|
| 1615 | + continue; |
---|
| 1616 | + hdr = buf_msg(_skb); |
---|
| 1617 | + msg_set_ack(hdr, ack); |
---|
| 1618 | + msg_set_bcast_ack(hdr, bc_ack); |
---|
| 1619 | + _skb->priority = TC_PRIO_CONTROL; |
---|
| 1620 | + __skb_queue_tail(xmitq, _skb); |
---|
| 1621 | + l->stats.retransmitted++; |
---|
| 1622 | + if (!is_uc) |
---|
| 1623 | + r->stats.retransmitted++; |
---|
| 1624 | + *retransmitted = true; |
---|
| 1625 | + /* Increase actual retrans counter & mark first time */ |
---|
| 1626 | + if (!TIPC_SKB_CB(skb)->retr_cnt++) |
---|
| 1627 | + TIPC_SKB_CB(skb)->retr_stamp = jiffies; |
---|
| 1628 | + } else { |
---|
| 1629 | + /* retry with Gap ACK blocks if any */ |
---|
| 1630 | + if (n >= gack_cnt) |
---|
| 1631 | + break; |
---|
| 1632 | + nacked = ntohs(gacks[n].ack); |
---|
| 1633 | + ngap = ntohs(gacks[n].gap); |
---|
| 1634 | + n++; |
---|
| 1635 | + goto next_gap_ack; |
---|
| 1636 | + } |
---|
| 1637 | + } |
---|
| 1638 | + |
---|
| 1639 | + /* Renew last Gap ACK blocks for bc if needed */ |
---|
| 1640 | + if (bc_has_acked) { |
---|
| 1641 | + if (this_ga) { |
---|
| 1642 | + kfree(last_ga); |
---|
| 1643 | + r->last_ga = this_ga; |
---|
| 1644 | + r->last_gap = gap; |
---|
| 1645 | + } else if (last_ga) { |
---|
| 1646 | + if (less(acked, start)) { |
---|
| 1647 | + si--; |
---|
| 1648 | + offset = start - acked - 1; |
---|
| 1649 | + } else if (less(acked, end)) { |
---|
| 1650 | + acked = end; |
---|
| 1651 | + } |
---|
| 1652 | + if (si < last_ga->bgack_cnt) { |
---|
| 1653 | + last_ga->start_index = si; |
---|
| 1654 | + r->last_gap = offset; |
---|
| 1655 | + } else { |
---|
| 1656 | + kfree(last_ga); |
---|
| 1657 | + r->last_ga = NULL; |
---|
| 1658 | + r->last_gap = 0; |
---|
| 1659 | + } |
---|
| 1660 | + } else { |
---|
| 1661 | + r->last_gap = 0; |
---|
| 1662 | + } |
---|
| 1663 | + r->acked = acked; |
---|
| 1664 | + } else { |
---|
| 1665 | + kfree(this_ga); |
---|
| 1666 | + } |
---|
| 1667 | + |
---|
| 1668 | + return qlen - skb_queue_len(&l->transmq); |
---|
1214 | 1669 | } |
---|
1215 | 1670 | |
---|
1216 | 1671 | /* tipc_link_build_state_msg: prepare link state message for transmission |
---|
.. | .. |
---|
1233 | 1688 | l->snd_nxt = l->rcv_nxt; |
---|
1234 | 1689 | return TIPC_LINK_SND_STATE; |
---|
1235 | 1690 | } |
---|
1236 | | - |
---|
1237 | 1691 | /* Unicast ACK */ |
---|
1238 | 1692 | l->rcv_unacked = 0; |
---|
1239 | 1693 | l->stats.sent_acks++; |
---|
.. | .. |
---|
1267 | 1721 | struct sk_buff_head *xmitq) |
---|
1268 | 1722 | { |
---|
1269 | 1723 | u32 def_cnt = ++l->stats.deferred_recv; |
---|
| 1724 | + struct sk_buff_head *dfq = &l->deferdq; |
---|
| 1725 | + u32 defq_len = skb_queue_len(dfq); |
---|
1270 | 1726 | int match1, match2; |
---|
1271 | 1727 | |
---|
1272 | 1728 | if (link_is_bc_rcvlink(l)) { |
---|
.. | .. |
---|
1277 | 1733 | return 0; |
---|
1278 | 1734 | } |
---|
1279 | 1735 | |
---|
1280 | | - if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV)) |
---|
1281 | | - tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq); |
---|
| 1736 | + if (defq_len >= 3 && !((defq_len - 3) % 16)) { |
---|
| 1737 | + u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; |
---|
| 1738 | + |
---|
| 1739 | + tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, |
---|
| 1740 | + rcvgap, 0, 0, xmitq); |
---|
| 1741 | + } |
---|
1282 | 1742 | return 0; |
---|
1283 | 1743 | } |
---|
1284 | 1744 | |
---|
.. | .. |
---|
1291 | 1751 | struct sk_buff_head *xmitq) |
---|
1292 | 1752 | { |
---|
1293 | 1753 | struct sk_buff_head *defq = &l->deferdq; |
---|
1294 | | - struct tipc_msg *hdr; |
---|
| 1754 | + struct tipc_msg *hdr = buf_msg(skb); |
---|
1295 | 1755 | u16 seqno, rcv_nxt, win_lim; |
---|
| 1756 | + int released = 0; |
---|
1296 | 1757 | int rc = 0; |
---|
| 1758 | + |
---|
| 1759 | + /* Verify and update link state */ |
---|
| 1760 | + if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) |
---|
| 1761 | + return tipc_link_proto_rcv(l, skb, xmitq); |
---|
| 1762 | + |
---|
| 1763 | + /* Don't send probe at next timeout expiration */ |
---|
| 1764 | + l->silent_intv_cnt = 0; |
---|
1297 | 1765 | |
---|
1298 | 1766 | do { |
---|
1299 | 1767 | hdr = buf_msg(skb); |
---|
.. | .. |
---|
1301 | 1769 | rcv_nxt = l->rcv_nxt; |
---|
1302 | 1770 | win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; |
---|
1303 | 1771 | |
---|
1304 | | - /* Verify and update link state */ |
---|
1305 | | - if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) |
---|
1306 | | - return tipc_link_proto_rcv(l, skb, xmitq); |
---|
1307 | | - |
---|
1308 | 1772 | if (unlikely(!link_is_up(l))) { |
---|
1309 | 1773 | if (l->state == LINK_ESTABLISHING) |
---|
1310 | 1774 | rc = TIPC_LINK_UP_EVT; |
---|
1311 | | - goto drop; |
---|
| 1775 | + kfree_skb(skb); |
---|
| 1776 | + break; |
---|
1312 | 1777 | } |
---|
1313 | | - |
---|
1314 | | - /* Don't send probe at next timeout expiration */ |
---|
1315 | | - l->silent_intv_cnt = 0; |
---|
1316 | 1778 | |
---|
1317 | 1779 | /* Drop if outside receive window */ |
---|
1318 | 1780 | if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) { |
---|
1319 | 1781 | l->stats.duplicates++; |
---|
1320 | | - goto drop; |
---|
| 1782 | + kfree_skb(skb); |
---|
| 1783 | + break; |
---|
1321 | 1784 | } |
---|
1322 | | - |
---|
1323 | | - /* Forward queues and wake up waiting users */ |
---|
1324 | | - if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { |
---|
1325 | | - l->stale_cnt = 0; |
---|
1326 | | - tipc_link_advance_backlog(l, xmitq); |
---|
1327 | | - if (unlikely(!skb_queue_empty(&l->wakeupq))) |
---|
1328 | | - link_prepare_wakeup(l); |
---|
1329 | | - } |
---|
| 1785 | + released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0, |
---|
| 1786 | + NULL, NULL, NULL, NULL); |
---|
1330 | 1787 | |
---|
1331 | 1788 | /* Defer delivery if sequence gap */ |
---|
1332 | 1789 | if (unlikely(seqno != rcv_nxt)) { |
---|
1333 | | - __tipc_skb_queue_sorted(defq, seqno, skb); |
---|
| 1790 | + if (!__tipc_skb_queue_sorted(defq, seqno, skb)) |
---|
| 1791 | + l->stats.duplicates++; |
---|
1334 | 1792 | rc |= tipc_link_build_nack_msg(l, xmitq); |
---|
1335 | 1793 | break; |
---|
1336 | 1794 | } |
---|
.. | .. |
---|
1338 | 1796 | /* Deliver packet */ |
---|
1339 | 1797 | l->rcv_nxt++; |
---|
1340 | 1798 | l->stats.recv_pkts++; |
---|
1341 | | - if (!tipc_data_input(l, skb, l->inputq)) |
---|
1342 | | - rc |= tipc_link_input(l, skb, l->inputq); |
---|
| 1799 | + |
---|
| 1800 | + if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL)) |
---|
| 1801 | + rc |= tipc_link_tnl_rcv(l, skb, l->inputq); |
---|
| 1802 | + else if (!tipc_data_input(l, skb, l->inputq)) |
---|
| 1803 | + rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf); |
---|
1343 | 1804 | if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) |
---|
1344 | 1805 | rc |= tipc_link_build_state_msg(l, xmitq); |
---|
1345 | 1806 | if (unlikely(rc & ~TIPC_LINK_SND_STATE)) |
---|
1346 | 1807 | break; |
---|
1347 | | - } while ((skb = __skb_dequeue(defq))); |
---|
| 1808 | + } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt))); |
---|
1348 | 1809 | |
---|
1349 | | - return rc; |
---|
1350 | | -drop: |
---|
1351 | | - kfree_skb(skb); |
---|
| 1810 | + /* Forward queues and wake up waiting users */ |
---|
| 1811 | + if (released) { |
---|
| 1812 | + tipc_link_update_cwin(l, released, 0); |
---|
| 1813 | + tipc_link_advance_backlog(l, xmitq); |
---|
| 1814 | + if (unlikely(!skb_queue_empty(&l->wakeupq))) |
---|
| 1815 | + link_prepare_wakeup(l); |
---|
| 1816 | + } |
---|
1352 | 1817 | return rc; |
---|
1353 | 1818 | } |
---|
1354 | 1819 | |
---|
.. | .. |
---|
1357 | 1822 | int tolerance, int priority, |
---|
1358 | 1823 | struct sk_buff_head *xmitq) |
---|
1359 | 1824 | { |
---|
1360 | | - struct tipc_link *bcl = l->bc_rcvlink; |
---|
1361 | | - struct sk_buff *skb; |
---|
1362 | | - struct tipc_msg *hdr; |
---|
1363 | | - struct sk_buff_head *dfq = &l->deferdq; |
---|
1364 | | - bool node_up = link_is_up(bcl); |
---|
1365 | 1825 | struct tipc_mon_state *mstate = &l->mon_state; |
---|
| 1826 | + struct sk_buff_head *dfq = &l->deferdq; |
---|
| 1827 | + struct tipc_link *bcl = l->bc_rcvlink; |
---|
| 1828 | + struct tipc_msg *hdr; |
---|
| 1829 | + struct sk_buff *skb; |
---|
| 1830 | + bool node_up = link_is_up(bcl); |
---|
| 1831 | + u16 glen = 0, bc_rcvgap = 0; |
---|
1366 | 1832 | int dlen = 0; |
---|
1367 | 1833 | void *data; |
---|
1368 | 1834 | |
---|
.. | .. |
---|
1373 | 1839 | if (!tipc_link_is_up(l) && (mtyp == STATE_MSG)) |
---|
1374 | 1840 | return; |
---|
1375 | 1841 | |
---|
1376 | | - if (!skb_queue_empty(dfq)) |
---|
| 1842 | + if ((probe || probe_reply) && !skb_queue_empty(dfq)) |
---|
1377 | 1843 | rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; |
---|
1378 | 1844 | |
---|
1379 | 1845 | skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, |
---|
1380 | | - tipc_max_domain_size, l->addr, |
---|
1381 | | - tipc_own_addr(l->net), 0, 0, 0); |
---|
| 1846 | + tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ, |
---|
| 1847 | + l->addr, tipc_own_addr(l->net), 0, 0, 0); |
---|
1382 | 1848 | if (!skb) |
---|
1383 | 1849 | return; |
---|
1384 | 1850 | |
---|
.. | .. |
---|
1402 | 1868 | if (l->peer_caps & TIPC_LINK_PROTO_SEQNO) |
---|
1403 | 1869 | msg_set_seqno(hdr, l->snd_nxt_state++); |
---|
1404 | 1870 | msg_set_seq_gap(hdr, rcvgap); |
---|
1405 | | - msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl)); |
---|
| 1871 | + bc_rcvgap = link_bc_rcv_gap(bcl); |
---|
| 1872 | + msg_set_bc_gap(hdr, bc_rcvgap); |
---|
1406 | 1873 | msg_set_probe(hdr, probe); |
---|
1407 | 1874 | msg_set_is_keepalive(hdr, probe || probe_reply); |
---|
1408 | | - tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id); |
---|
1409 | | - msg_set_size(hdr, INT_H_SIZE + dlen); |
---|
1410 | | - skb_trim(skb, INT_H_SIZE + dlen); |
---|
| 1875 | + if (l->peer_caps & TIPC_GAP_ACK_BLOCK) |
---|
| 1876 | + glen = tipc_build_gap_ack_blks(l, hdr); |
---|
| 1877 | + tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id); |
---|
| 1878 | + msg_set_size(hdr, INT_H_SIZE + glen + dlen); |
---|
| 1879 | + skb_trim(skb, INT_H_SIZE + glen + dlen); |
---|
1411 | 1880 | l->stats.sent_states++; |
---|
1412 | 1881 | l->rcv_unacked = 0; |
---|
1413 | 1882 | } else { |
---|
1414 | 1883 | /* RESET_MSG or ACTIVATE_MSG */ |
---|
| 1884 | + if (mtyp == ACTIVATE_MSG) { |
---|
| 1885 | + msg_set_dest_session_valid(hdr, 1); |
---|
| 1886 | + msg_set_dest_session(hdr, l->peer_session); |
---|
| 1887 | + } |
---|
1415 | 1888 | msg_set_max_pkt(hdr, l->advertised_mtu); |
---|
1416 | 1889 | strcpy(data, l->if_name); |
---|
1417 | 1890 | msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); |
---|
.. | .. |
---|
1421 | 1894 | l->stats.sent_probes++; |
---|
1422 | 1895 | if (rcvgap) |
---|
1423 | 1896 | l->stats.sent_nacks++; |
---|
| 1897 | + if (bc_rcvgap) |
---|
| 1898 | + bcl->stats.sent_nacks++; |
---|
1424 | 1899 | skb->priority = TC_PRIO_CONTROL; |
---|
1425 | 1900 | __skb_queue_tail(xmitq, skb); |
---|
| 1901 | + trace_tipc_proto_build(skb, false, l->name); |
---|
1426 | 1902 | } |
---|
1427 | 1903 | |
---|
1428 | 1904 | void tipc_link_create_dummy_tnl_msg(struct tipc_link *l, |
---|
.. | .. |
---|
1461 | 1937 | void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, |
---|
1462 | 1938 | int mtyp, struct sk_buff_head *xmitq) |
---|
1463 | 1939 | { |
---|
| 1940 | + struct sk_buff_head *fdefq = &tnl->failover_deferdq; |
---|
1464 | 1941 | struct sk_buff *skb, *tnlskb; |
---|
1465 | 1942 | struct tipc_msg *hdr, tnlhdr; |
---|
1466 | 1943 | struct sk_buff_head *queue = &l->transmq; |
---|
1467 | | - struct sk_buff_head tmpxq, tnlq; |
---|
| 1944 | + struct sk_buff_head tmpxq, tnlq, frags; |
---|
1468 | 1945 | u16 pktlen, pktcnt, seqno = l->snd_nxt; |
---|
| 1946 | + bool pktcnt_need_update = false; |
---|
| 1947 | + u16 syncpt; |
---|
| 1948 | + int rc; |
---|
1469 | 1949 | |
---|
1470 | 1950 | if (!tnl) |
---|
1471 | 1951 | return; |
---|
1472 | 1952 | |
---|
1473 | 1953 | __skb_queue_head_init(&tnlq); |
---|
1474 | | - __skb_queue_head_init(&tmpxq); |
---|
| 1954 | + /* Link Synching: |
---|
| 1955 | + * From now on, send only one single ("dummy") SYNCH message |
---|
| 1956 | + * to peer. The SYNCH message does not contain any data, just |
---|
| 1957 | + * a header conveying the synch point to the peer. |
---|
| 1958 | + */ |
---|
| 1959 | + if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) { |
---|
| 1960 | + tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG, |
---|
| 1961 | + INT_H_SIZE, 0, l->addr, |
---|
| 1962 | + tipc_own_addr(l->net), |
---|
| 1963 | + 0, 0, 0); |
---|
| 1964 | + if (!tnlskb) { |
---|
| 1965 | + pr_warn("%sunable to create dummy SYNCH_MSG\n", |
---|
| 1966 | + link_co_err); |
---|
| 1967 | + return; |
---|
| 1968 | + } |
---|
1475 | 1969 | |
---|
| 1970 | + hdr = buf_msg(tnlskb); |
---|
| 1971 | + syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1; |
---|
| 1972 | + msg_set_syncpt(hdr, syncpt); |
---|
| 1973 | + msg_set_bearer_id(hdr, l->peer_bearer_id); |
---|
| 1974 | + __skb_queue_tail(&tnlq, tnlskb); |
---|
| 1975 | + tipc_link_xmit(tnl, &tnlq, xmitq); |
---|
| 1976 | + return; |
---|
| 1977 | + } |
---|
| 1978 | + |
---|
| 1979 | + __skb_queue_head_init(&tmpxq); |
---|
| 1980 | + __skb_queue_head_init(&frags); |
---|
1476 | 1981 | /* At least one packet required for safe algorithm => add dummy */ |
---|
1477 | 1982 | skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, |
---|
1478 | 1983 | BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net), |
---|
.. | .. |
---|
1488 | 1993 | /* Initialize reusable tunnel packet header */ |
---|
1489 | 1994 | tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL, |
---|
1490 | 1995 | mtyp, INT_H_SIZE, l->addr); |
---|
1491 | | - pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq); |
---|
| 1996 | + if (mtyp == SYNCH_MSG) |
---|
| 1997 | + pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq)); |
---|
| 1998 | + else |
---|
| 1999 | + pktcnt = skb_queue_len(&l->transmq); |
---|
| 2000 | + pktcnt += skb_queue_len(&l->backlogq); |
---|
1492 | 2001 | msg_set_msgcnt(&tnlhdr, pktcnt); |
---|
1493 | 2002 | msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); |
---|
1494 | 2003 | tnl: |
---|
.. | .. |
---|
1498 | 2007 | if (queue == &l->backlogq) |
---|
1499 | 2008 | msg_set_seqno(hdr, seqno++); |
---|
1500 | 2009 | pktlen = msg_size(hdr); |
---|
| 2010 | + |
---|
| 2011 | + /* Tunnel link MTU is not large enough? This could be |
---|
| 2012 | + * due to: |
---|
| 2013 | + * 1) Link MTU has just changed or set differently; |
---|
| 2014 | + * 2) Or FAILOVER on the top of a SYNCH message |
---|
| 2015 | + * |
---|
| 2016 | + * The 2nd case should not happen if peer supports |
---|
| 2017 | + * TIPC_TUNNEL_ENHANCED |
---|
| 2018 | + */ |
---|
| 2019 | + if (pktlen > tnl->mtu - INT_H_SIZE) { |
---|
| 2020 | + if (mtyp == FAILOVER_MSG && |
---|
| 2021 | + (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) { |
---|
| 2022 | + rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu, |
---|
| 2023 | + &frags); |
---|
| 2024 | + if (rc) { |
---|
| 2025 | + pr_warn("%sunable to frag msg: rc %d\n", |
---|
| 2026 | + link_co_err, rc); |
---|
| 2027 | + return; |
---|
| 2028 | + } |
---|
| 2029 | + pktcnt += skb_queue_len(&frags) - 1; |
---|
| 2030 | + pktcnt_need_update = true; |
---|
| 2031 | + skb_queue_splice_tail_init(&frags, &tnlq); |
---|
| 2032 | + continue; |
---|
| 2033 | + } |
---|
| 2034 | + /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED |
---|
| 2035 | + * => Just warn it and return! |
---|
| 2036 | + */ |
---|
| 2037 | + pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n", |
---|
| 2038 | + link_co_err, msg_user(hdr), |
---|
| 2039 | + msg_type(hdr), msg_size(hdr)); |
---|
| 2040 | + return; |
---|
| 2041 | + } |
---|
| 2042 | + |
---|
1501 | 2043 | msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); |
---|
1502 | 2044 | tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC); |
---|
1503 | 2045 | if (!tnlskb) { |
---|
.. | .. |
---|
1513 | 2055 | goto tnl; |
---|
1514 | 2056 | } |
---|
1515 | 2057 | |
---|
| 2058 | + if (pktcnt_need_update) |
---|
| 2059 | + skb_queue_walk(&tnlq, skb) { |
---|
| 2060 | + hdr = buf_msg(skb); |
---|
| 2061 | + msg_set_msgcnt(hdr, pktcnt); |
---|
| 2062 | + } |
---|
| 2063 | + |
---|
1516 | 2064 | tipc_link_xmit(tnl, &tnlq, xmitq); |
---|
1517 | 2065 | |
---|
1518 | 2066 | if (mtyp == FAILOVER_MSG) { |
---|
1519 | 2067 | tnl->drop_point = l->rcv_nxt; |
---|
1520 | 2068 | tnl->failover_reasm_skb = l->reasm_buf; |
---|
1521 | 2069 | l->reasm_buf = NULL; |
---|
| 2070 | + |
---|
| 2071 | + /* Failover the link's deferdq */ |
---|
| 2072 | + if (unlikely(!skb_queue_empty(fdefq))) { |
---|
| 2073 | + pr_warn("Link failover deferdq not empty: %d!\n", |
---|
| 2074 | + skb_queue_len(fdefq)); |
---|
| 2075 | + __skb_queue_purge(fdefq); |
---|
| 2076 | + } |
---|
| 2077 | + skb_queue_splice_init(&l->deferdq, fdefq); |
---|
| 2078 | + } |
---|
| 2079 | +} |
---|
| 2080 | + |
---|
| 2081 | +/** |
---|
| 2082 | + * tipc_link_failover_prepare() - prepare tnl for link failover |
---|
| 2083 | + * |
---|
| 2084 | + * This is a special version of the precursor - tipc_link_tnl_prepare(), |
---|
| 2085 | + * see the tipc_node_link_failover() for details |
---|
| 2086 | + * |
---|
| 2087 | + * @l: failover link |
---|
| 2088 | + * @tnl: tunnel link |
---|
| 2089 | + * @xmitq: queue for messages to be xmited |
---|
| 2090 | + */ |
---|
| 2091 | +void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl, |
---|
| 2092 | + struct sk_buff_head *xmitq) |
---|
| 2093 | +{ |
---|
| 2094 | + struct sk_buff_head *fdefq = &tnl->failover_deferdq; |
---|
| 2095 | + |
---|
| 2096 | + tipc_link_create_dummy_tnl_msg(tnl, xmitq); |
---|
| 2097 | + |
---|
| 2098 | + /* This failover link endpoint was never established before, |
---|
| 2099 | + * so it has not received anything from peer. |
---|
| 2100 | + * Otherwise, it must be a normal failover situation or the |
---|
| 2101 | + * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes |
---|
| 2102 | + * would have to start over from scratch instead. |
---|
| 2103 | + */ |
---|
| 2104 | + tnl->drop_point = 1; |
---|
| 2105 | + tnl->failover_reasm_skb = NULL; |
---|
| 2106 | + |
---|
| 2107 | + /* Initiate the link's failover deferdq */ |
---|
| 2108 | + if (unlikely(!skb_queue_empty(fdefq))) { |
---|
| 2109 | + pr_warn("Link failover deferdq not empty: %d!\n", |
---|
| 2110 | + skb_queue_len(fdefq)); |
---|
| 2111 | + __skb_queue_purge(fdefq); |
---|
1522 | 2112 | } |
---|
1523 | 2113 | } |
---|
1524 | 2114 | |
---|
.. | .. |
---|
1572 | 2162 | struct sk_buff_head *xmitq) |
---|
1573 | 2163 | { |
---|
1574 | 2164 | struct tipc_msg *hdr = buf_msg(skb); |
---|
1575 | | - u16 rcvgap = 0; |
---|
1576 | | - u16 ack = msg_ack(hdr); |
---|
1577 | | - u16 gap = msg_seq_gap(hdr); |
---|
| 2165 | + struct tipc_gap_ack_blks *ga = NULL; |
---|
| 2166 | + bool reply = msg_probe(hdr), retransmitted = false; |
---|
| 2167 | + u32 dlen = msg_data_sz(hdr), glen = 0; |
---|
1578 | 2168 | u16 peers_snd_nxt = msg_next_sent(hdr); |
---|
1579 | 2169 | u16 peers_tol = msg_link_tolerance(hdr); |
---|
1580 | 2170 | u16 peers_prio = msg_linkprio(hdr); |
---|
| 2171 | + u16 gap = msg_seq_gap(hdr); |
---|
| 2172 | + u16 ack = msg_ack(hdr); |
---|
1581 | 2173 | u16 rcv_nxt = l->rcv_nxt; |
---|
1582 | | - u32 dlen = msg_data_sz(hdr); |
---|
| 2174 | + u16 rcvgap = 0; |
---|
1583 | 2175 | int mtyp = msg_type(hdr); |
---|
1584 | | - bool reply = msg_probe(hdr); |
---|
1585 | | - void *data; |
---|
| 2176 | + int rc = 0, released; |
---|
1586 | 2177 | char *if_name; |
---|
1587 | | - int rc = 0; |
---|
| 2178 | + void *data; |
---|
| 2179 | + |
---|
| 2180 | + trace_tipc_proto_rcv(skb, false, l->name); |
---|
1588 | 2181 | |
---|
1589 | 2182 | if (dlen > U16_MAX) |
---|
1590 | 2183 | goto exit; |
---|
.. | .. |
---|
1595 | 2188 | if (tipc_own_addr(l->net) > msg_prevnode(hdr)) |
---|
1596 | 2189 | l->net_plane = msg_net_plane(hdr); |
---|
1597 | 2190 | |
---|
1598 | | - skb_linearize(skb); |
---|
| 2191 | + if (skb_linearize(skb)) |
---|
| 2192 | + goto exit; |
---|
| 2193 | + |
---|
1599 | 2194 | hdr = buf_msg(skb); |
---|
1600 | 2195 | data = msg_data(hdr); |
---|
1601 | 2196 | |
---|
1602 | | - if (!tipc_link_validate_msg(l, hdr)) |
---|
| 2197 | + if (!tipc_link_validate_msg(l, hdr)) { |
---|
| 2198 | + trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!"); |
---|
| 2199 | + trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!"); |
---|
1603 | 2200 | goto exit; |
---|
| 2201 | + } |
---|
1604 | 2202 | |
---|
1605 | 2203 | switch (mtyp) { |
---|
1606 | 2204 | case RESET_MSG: |
---|
.. | .. |
---|
1627 | 2225 | rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); |
---|
1628 | 2226 | break; |
---|
1629 | 2227 | } |
---|
| 2228 | + |
---|
| 2229 | + /* If this endpoint was re-created while peer was ESTABLISHING |
---|
| 2230 | + * it doesn't know current session number. Force re-synch. |
---|
| 2231 | + */ |
---|
| 2232 | + if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) && |
---|
| 2233 | + l->session != msg_dest_session(hdr)) { |
---|
| 2234 | + if (less(l->session, msg_dest_session(hdr))) |
---|
| 2235 | + l->session = msg_dest_session(hdr) + 1; |
---|
| 2236 | + break; |
---|
| 2237 | + } |
---|
| 2238 | + |
---|
1630 | 2239 | /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ |
---|
1631 | 2240 | if (mtyp == RESET_MSG || !link_is_up(l)) |
---|
1632 | 2241 | rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); |
---|
.. | .. |
---|
1643 | 2252 | break; |
---|
1644 | 2253 | |
---|
1645 | 2254 | case STATE_MSG: |
---|
| 2255 | + /* Validate Gap ACK blocks, drop if invalid */ |
---|
| 2256 | + glen = tipc_get_gap_ack_blks(&ga, l, hdr, true); |
---|
| 2257 | + if (glen > dlen) |
---|
| 2258 | + break; |
---|
| 2259 | + |
---|
1646 | 2260 | l->rcv_nxt_state = msg_seqno(hdr) + 1; |
---|
1647 | 2261 | |
---|
1648 | 2262 | /* Update own tolerance if peer indicates a non-zero value */ |
---|
.. | .. |
---|
1667 | 2281 | rc = TIPC_LINK_UP_EVT; |
---|
1668 | 2282 | break; |
---|
1669 | 2283 | } |
---|
1670 | | - tipc_mon_rcv(l->net, data, dlen, l->addr, |
---|
| 2284 | + |
---|
| 2285 | + tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr, |
---|
1671 | 2286 | &l->mon_state, l->bearer_id); |
---|
1672 | 2287 | |
---|
1673 | 2288 | /* Send NACK if peer has sent pkts we haven't received yet */ |
---|
1674 | | - if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) |
---|
| 2289 | + if ((reply || msg_is_keepalive(hdr)) && |
---|
| 2290 | + more(peers_snd_nxt, rcv_nxt) && |
---|
| 2291 | + !tipc_link_is_synching(l) && |
---|
| 2292 | + skb_queue_empty(&l->deferdq)) |
---|
1675 | 2293 | rcvgap = peers_snd_nxt - l->rcv_nxt; |
---|
1676 | 2294 | if (rcvgap || reply) |
---|
1677 | 2295 | tipc_link_build_proto_msg(l, STATE_MSG, 0, reply, |
---|
1678 | 2296 | rcvgap, 0, 0, xmitq); |
---|
1679 | | - tipc_link_release_pkts(l, ack); |
---|
1680 | 2297 | |
---|
1681 | | - /* If NACK, retransmit will now start at right position */ |
---|
1682 | | - if (gap) { |
---|
1683 | | - rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq); |
---|
| 2298 | + released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq, |
---|
| 2299 | + &retransmitted, &rc); |
---|
| 2300 | + if (gap) |
---|
1684 | 2301 | l->stats.recv_nacks++; |
---|
1685 | | - } |
---|
1686 | | - |
---|
1687 | | - tipc_link_advance_backlog(l, xmitq); |
---|
| 2302 | + if (released || retransmitted) |
---|
| 2303 | + tipc_link_update_cwin(l, released, retransmitted); |
---|
| 2304 | + if (released) |
---|
| 2305 | + tipc_link_advance_backlog(l, xmitq); |
---|
1688 | 2306 | if (unlikely(!skb_queue_empty(&l->wakeupq))) |
---|
1689 | 2307 | link_prepare_wakeup(l); |
---|
1690 | 2308 | } |
---|
.. | .. |
---|
1765 | 2383 | l->rcv_nxt = peers_snd_nxt; |
---|
1766 | 2384 | } |
---|
1767 | 2385 | |
---|
1768 | | -/* link_bc_retr eval()- check if the indicated range can be retransmitted now |
---|
1769 | | - * - Adjust permitted range if there is overlap with previous retransmission |
---|
1770 | | - */ |
---|
1771 | | -static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to) |
---|
1772 | | -{ |
---|
1773 | | - unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr); |
---|
1774 | | - |
---|
1775 | | - if (less(*to, *from)) |
---|
1776 | | - return false; |
---|
1777 | | - |
---|
1778 | | - /* New retransmission request */ |
---|
1779 | | - if ((elapsed > TIPC_BC_RETR_LIMIT) || |
---|
1780 | | - less(*to, l->prev_from) || more(*from, l->prev_to)) { |
---|
1781 | | - l->prev_from = *from; |
---|
1782 | | - l->prev_to = *to; |
---|
1783 | | - l->prev_retr = jiffies; |
---|
1784 | | - return true; |
---|
1785 | | - } |
---|
1786 | | - |
---|
1787 | | - /* Inside range of previous retransmit */ |
---|
1788 | | - if (!less(*from, l->prev_from) && !more(*to, l->prev_to)) |
---|
1789 | | - return false; |
---|
1790 | | - |
---|
1791 | | - /* Fully or partially outside previous range => exclude overlap */ |
---|
1792 | | - if (less(*from, l->prev_from)) { |
---|
1793 | | - *to = l->prev_from - 1; |
---|
1794 | | - l->prev_from = *from; |
---|
1795 | | - } |
---|
1796 | | - if (more(*to, l->prev_to)) { |
---|
1797 | | - *from = l->prev_to + 1; |
---|
1798 | | - l->prev_to = *to; |
---|
1799 | | - } |
---|
1800 | | - l->prev_retr = jiffies; |
---|
1801 | | - return true; |
---|
1802 | | -} |
---|
1803 | | - |
---|
1804 | 2386 | /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state |
---|
1805 | 2387 | */ |
---|
1806 | 2388 | int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, |
---|
1807 | 2389 | struct sk_buff_head *xmitq) |
---|
1808 | 2390 | { |
---|
1809 | | - struct tipc_link *snd_l = l->bc_sndlink; |
---|
1810 | 2391 | u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); |
---|
1811 | | - u16 from = msg_bcast_ack(hdr) + 1; |
---|
1812 | | - u16 to = from + msg_bc_gap(hdr) - 1; |
---|
1813 | 2392 | int rc = 0; |
---|
1814 | 2393 | |
---|
1815 | 2394 | if (!link_is_up(l)) |
---|
.. | .. |
---|
1825 | 2404 | if (!l->bc_peer_is_up) |
---|
1826 | 2405 | return rc; |
---|
1827 | 2406 | |
---|
1828 | | - l->stats.recv_nacks++; |
---|
1829 | | - |
---|
1830 | 2407 | /* Ignore if peers_snd_nxt goes beyond receive window */ |
---|
1831 | 2408 | if (more(peers_snd_nxt, l->rcv_nxt + l->window)) |
---|
1832 | 2409 | return rc; |
---|
1833 | | - |
---|
1834 | | - if (link_bc_retr_eval(snd_l, &from, &to)) |
---|
1835 | | - rc = tipc_link_retrans(snd_l, l, from, to, xmitq); |
---|
1836 | 2410 | |
---|
1837 | 2411 | l->snd_nxt = peers_snd_nxt; |
---|
1838 | 2412 | if (link_bc_rcv_gap(l)) |
---|
.. | .. |
---|
1868 | 2442 | return 0; |
---|
1869 | 2443 | } |
---|
1870 | 2444 | |
---|
1871 | | -void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, |
---|
1872 | | - struct sk_buff_head *xmitq) |
---|
| 2445 | +int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap, |
---|
| 2446 | + struct tipc_gap_ack_blks *ga, |
---|
| 2447 | + struct sk_buff_head *xmitq, |
---|
| 2448 | + struct sk_buff_head *retrq) |
---|
1873 | 2449 | { |
---|
1874 | | - struct sk_buff *skb, *tmp; |
---|
1875 | | - struct tipc_link *snd_l = l->bc_sndlink; |
---|
| 2450 | + struct tipc_link *l = r->bc_sndlink; |
---|
| 2451 | + bool unused = false; |
---|
| 2452 | + int rc = 0; |
---|
1876 | 2453 | |
---|
1877 | | - if (!link_is_up(l) || !l->bc_peer_is_up) |
---|
1878 | | - return; |
---|
| 2454 | + if (!link_is_up(r) || !r->bc_peer_is_up) |
---|
| 2455 | + return 0; |
---|
1879 | 2456 | |
---|
1880 | | - if (!more(acked, l->acked)) |
---|
1881 | | - return; |
---|
1882 | | - |
---|
1883 | | - /* Skip over packets peer has already acked */ |
---|
1884 | | - skb_queue_walk(&snd_l->transmq, skb) { |
---|
1885 | | - if (more(buf_seqno(skb), l->acked)) |
---|
1886 | | - break; |
---|
| 2457 | + if (gap) { |
---|
| 2458 | + l->stats.recv_nacks++; |
---|
| 2459 | + r->stats.recv_nacks++; |
---|
1887 | 2460 | } |
---|
1888 | 2461 | |
---|
1889 | | - /* Update/release the packets peer is acking now */ |
---|
1890 | | - skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) { |
---|
1891 | | - if (more(buf_seqno(skb), acked)) |
---|
1892 | | - break; |
---|
1893 | | - if (!--TIPC_SKB_CB(skb)->ackers) { |
---|
1894 | | - __skb_unlink(skb, &snd_l->transmq); |
---|
1895 | | - kfree_skb(skb); |
---|
1896 | | - } |
---|
1897 | | - } |
---|
1898 | | - l->acked = acked; |
---|
1899 | | - tipc_link_advance_backlog(snd_l, xmitq); |
---|
1900 | | - if (unlikely(!skb_queue_empty(&snd_l->wakeupq))) |
---|
1901 | | - link_prepare_wakeup(snd_l); |
---|
| 2462 | + if (less(acked, r->acked) || (acked == r->acked && !gap && !ga)) |
---|
| 2463 | + return 0; |
---|
| 2464 | + |
---|
| 2465 | + trace_tipc_link_bc_ack(r, acked, gap, &l->transmq); |
---|
| 2466 | + tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc); |
---|
| 2467 | + |
---|
| 2468 | + tipc_link_advance_backlog(l, xmitq); |
---|
| 2469 | + if (unlikely(!skb_queue_empty(&l->wakeupq))) |
---|
| 2470 | + link_prepare_wakeup(l); |
---|
| 2471 | + |
---|
| 2472 | + return rc; |
---|
1902 | 2473 | } |
---|
1903 | 2474 | |
---|
1904 | 2475 | /* tipc_link_bc_nack_rcv(): receive broadcast nack message |
---|
.. | .. |
---|
1926 | 2497 | return 0; |
---|
1927 | 2498 | |
---|
1928 | 2499 | if (dnode == tipc_own_addr(l->net)) { |
---|
1929 | | - tipc_link_bc_ack_rcv(l, acked, xmitq); |
---|
1930 | | - rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq); |
---|
| 2500 | + rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq, |
---|
| 2501 | + xmitq); |
---|
1931 | 2502 | l->stats.recv_nacks++; |
---|
1932 | 2503 | return rc; |
---|
1933 | 2504 | } |
---|
.. | .. |
---|
1939 | 2510 | return 0; |
---|
1940 | 2511 | } |
---|
1941 | 2512 | |
---|
1942 | | -void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) |
---|
| 2513 | +void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win) |
---|
1943 | 2514 | { |
---|
1944 | 2515 | int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE); |
---|
1945 | 2516 | |
---|
1946 | | - l->window = win; |
---|
1947 | | - l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win); |
---|
1948 | | - l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2); |
---|
1949 | | - l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3); |
---|
1950 | | - l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4); |
---|
| 2517 | + l->min_win = min_win; |
---|
| 2518 | + l->ssthresh = max_win; |
---|
| 2519 | + l->max_win = max_win; |
---|
| 2520 | + l->window = min_win; |
---|
| 2521 | + l->backlog[TIPC_LOW_IMPORTANCE].limit = min_win * 2; |
---|
| 2522 | + l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = min_win * 4; |
---|
| 2523 | + l->backlog[TIPC_HIGH_IMPORTANCE].limit = min_win * 6; |
---|
| 2524 | + l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8; |
---|
1951 | 2525 | l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; |
---|
1952 | 2526 | } |
---|
1953 | 2527 | |
---|
.. | .. |
---|
1978 | 2552 | { |
---|
1979 | 2553 | int err; |
---|
1980 | 2554 | |
---|
1981 | | - err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, |
---|
1982 | | - tipc_nl_prop_policy, NULL); |
---|
| 2555 | + err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop, |
---|
| 2556 | + tipc_nl_prop_policy, NULL); |
---|
1983 | 2557 | if (err) |
---|
1984 | 2558 | return err; |
---|
1985 | 2559 | |
---|
.. | .. |
---|
2000 | 2574 | } |
---|
2001 | 2575 | |
---|
2002 | 2576 | if (props[TIPC_NLA_PROP_WIN]) { |
---|
2003 | | - u32 win; |
---|
| 2577 | + u32 max_win; |
---|
2004 | 2578 | |
---|
2005 | | - win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); |
---|
2006 | | - if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) |
---|
| 2579 | + max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); |
---|
| 2580 | + if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN) |
---|
2007 | 2581 | return -EINVAL; |
---|
2008 | 2582 | } |
---|
2009 | 2583 | |
---|
.. | .. |
---|
2058 | 2632 | (s->accu_queue_sz / s->queue_sz_counts) : 0} |
---|
2059 | 2633 | }; |
---|
2060 | 2634 | |
---|
2061 | | - stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS); |
---|
| 2635 | + stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS); |
---|
2062 | 2636 | if (!stats) |
---|
2063 | 2637 | return -EMSGSIZE; |
---|
2064 | 2638 | |
---|
.. | .. |
---|
2090 | 2664 | if (!hdr) |
---|
2091 | 2665 | return -EMSGSIZE; |
---|
2092 | 2666 | |
---|
2093 | | - attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); |
---|
| 2667 | + attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK); |
---|
2094 | 2668 | if (!attrs) |
---|
2095 | 2669 | goto msg_full; |
---|
2096 | 2670 | |
---|
.. | .. |
---|
2112 | 2686 | if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) |
---|
2113 | 2687 | goto attr_msg_full; |
---|
2114 | 2688 | |
---|
2115 | | - prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); |
---|
| 2689 | + prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP); |
---|
2116 | 2690 | if (!prop) |
---|
2117 | 2691 | goto attr_msg_full; |
---|
2118 | 2692 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) |
---|
.. | .. |
---|
2179 | 2753 | (stats->accu_queue_sz / stats->queue_sz_counts) : 0} |
---|
2180 | 2754 | }; |
---|
2181 | 2755 | |
---|
2182 | | - nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS); |
---|
| 2756 | + nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS); |
---|
2183 | 2757 | if (!nest) |
---|
2184 | 2758 | return -EMSGSIZE; |
---|
2185 | 2759 | |
---|
.. | .. |
---|
2196 | 2770 | return -EMSGSIZE; |
---|
2197 | 2771 | } |
---|
2198 | 2772 | |
---|
2199 | | -int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) |
---|
| 2773 | +int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg, |
---|
| 2774 | + struct tipc_link *bcl) |
---|
2200 | 2775 | { |
---|
2201 | 2776 | int err; |
---|
2202 | 2777 | void *hdr; |
---|
2203 | 2778 | struct nlattr *attrs; |
---|
2204 | 2779 | struct nlattr *prop; |
---|
2205 | | - struct tipc_net *tn = net_generic(net, tipc_net_id); |
---|
2206 | | - struct tipc_link *bcl = tn->bcl; |
---|
| 2780 | + u32 bc_mode = tipc_bcast_get_mode(net); |
---|
| 2781 | + u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); |
---|
2207 | 2782 | |
---|
2208 | 2783 | if (!bcl) |
---|
2209 | 2784 | return 0; |
---|
.. | .. |
---|
2217 | 2792 | return -EMSGSIZE; |
---|
2218 | 2793 | } |
---|
2219 | 2794 | |
---|
2220 | | - attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); |
---|
| 2795 | + attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK); |
---|
2221 | 2796 | if (!attrs) |
---|
2222 | 2797 | goto msg_full; |
---|
2223 | 2798 | |
---|
.. | .. |
---|
2234 | 2809 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0)) |
---|
2235 | 2810 | goto attr_msg_full; |
---|
2236 | 2811 | |
---|
2237 | | - prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); |
---|
| 2812 | + prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP); |
---|
2238 | 2813 | if (!prop) |
---|
2239 | 2814 | goto attr_msg_full; |
---|
2240 | | - if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window)) |
---|
| 2815 | + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win)) |
---|
2241 | 2816 | goto prop_msg_full; |
---|
| 2817 | + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode)) |
---|
| 2818 | + goto prop_msg_full; |
---|
| 2819 | + if (bc_mode & BCLINK_MODE_SEL) |
---|
| 2820 | + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO, |
---|
| 2821 | + bc_ratio)) |
---|
| 2822 | + goto prop_msg_full; |
---|
2242 | 2823 | nla_nest_end(msg->skb, prop); |
---|
2243 | 2824 | |
---|
2244 | 2825 | err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats); |
---|
.. | .. |
---|
2283 | 2864 | { |
---|
2284 | 2865 | l->abort_limit = limit; |
---|
2285 | 2866 | } |
---|
| 2867 | + |
---|
| 2868 | +/** |
---|
| 2869 | + * tipc_link_dump - dump TIPC link data |
---|
| 2870 | + * @l: tipc link to be dumped |
---|
| 2871 | + * @dqueues: bitmask to decide if any link queue to be dumped? |
---|
| 2872 | + * - TIPC_DUMP_NONE: don't dump link queues |
---|
| 2873 | + * - TIPC_DUMP_TRANSMQ: dump link transmq queue |
---|
| 2874 | + * - TIPC_DUMP_BACKLOGQ: dump link backlog queue |
---|
| 2875 | + * - TIPC_DUMP_DEFERDQ: dump link deferd queue |
---|
| 2876 | + * - TIPC_DUMP_INPUTQ: dump link input queue |
---|
| 2877 | + * - TIPC_DUMP_WAKEUP: dump link wakeup queue |
---|
| 2878 | + * - TIPC_DUMP_ALL: dump all the link queues above |
---|
| 2879 | + * @buf: returned buffer of dump data in format |
---|
| 2880 | + */ |
---|
| 2881 | +int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf) |
---|
| 2882 | +{ |
---|
| 2883 | + int i = 0; |
---|
| 2884 | + size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN; |
---|
| 2885 | + struct sk_buff_head *list; |
---|
| 2886 | + struct sk_buff *hskb, *tskb; |
---|
| 2887 | + u32 len; |
---|
| 2888 | + |
---|
| 2889 | + if (!l) { |
---|
| 2890 | + i += scnprintf(buf, sz, "link data: (null)\n"); |
---|
| 2891 | + return i; |
---|
| 2892 | + } |
---|
| 2893 | + |
---|
| 2894 | + i += scnprintf(buf, sz, "link data: %x", l->addr); |
---|
| 2895 | + i += scnprintf(buf + i, sz - i, " %x", l->state); |
---|
| 2896 | + i += scnprintf(buf + i, sz - i, " %u", l->in_session); |
---|
| 2897 | + i += scnprintf(buf + i, sz - i, " %u", l->session); |
---|
| 2898 | + i += scnprintf(buf + i, sz - i, " %u", l->peer_session); |
---|
| 2899 | + i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt); |
---|
| 2900 | + i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt); |
---|
| 2901 | + i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state); |
---|
| 2902 | + i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state); |
---|
| 2903 | + i += scnprintf(buf + i, sz - i, " %x", l->peer_caps); |
---|
| 2904 | + i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt); |
---|
| 2905 | + i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt); |
---|
| 2906 | + i += scnprintf(buf + i, sz - i, " %u", 0); |
---|
| 2907 | + i += scnprintf(buf + i, sz - i, " %u", 0); |
---|
| 2908 | + i += scnprintf(buf + i, sz - i, " %u", l->acked); |
---|
| 2909 | + |
---|
| 2910 | + list = &l->transmq; |
---|
| 2911 | + len = skb_queue_len(list); |
---|
| 2912 | + hskb = skb_peek(list); |
---|
| 2913 | + tskb = skb_peek_tail(list); |
---|
| 2914 | + i += scnprintf(buf + i, sz - i, " | %u %u %u", len, |
---|
| 2915 | + (hskb) ? msg_seqno(buf_msg(hskb)) : 0, |
---|
| 2916 | + (tskb) ? msg_seqno(buf_msg(tskb)) : 0); |
---|
| 2917 | + |
---|
| 2918 | + list = &l->deferdq; |
---|
| 2919 | + len = skb_queue_len(list); |
---|
| 2920 | + hskb = skb_peek(list); |
---|
| 2921 | + tskb = skb_peek_tail(list); |
---|
| 2922 | + i += scnprintf(buf + i, sz - i, " | %u %u %u", len, |
---|
| 2923 | + (hskb) ? msg_seqno(buf_msg(hskb)) : 0, |
---|
| 2924 | + (tskb) ? msg_seqno(buf_msg(tskb)) : 0); |
---|
| 2925 | + |
---|
| 2926 | + list = &l->backlogq; |
---|
| 2927 | + len = skb_queue_len(list); |
---|
| 2928 | + hskb = skb_peek(list); |
---|
| 2929 | + tskb = skb_peek_tail(list); |
---|
| 2930 | + i += scnprintf(buf + i, sz - i, " | %u %u %u", len, |
---|
| 2931 | + (hskb) ? msg_seqno(buf_msg(hskb)) : 0, |
---|
| 2932 | + (tskb) ? msg_seqno(buf_msg(tskb)) : 0); |
---|
| 2933 | + |
---|
| 2934 | + list = l->inputq; |
---|
| 2935 | + len = skb_queue_len(list); |
---|
| 2936 | + hskb = skb_peek(list); |
---|
| 2937 | + tskb = skb_peek_tail(list); |
---|
| 2938 | + i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len, |
---|
| 2939 | + (hskb) ? msg_seqno(buf_msg(hskb)) : 0, |
---|
| 2940 | + (tskb) ? msg_seqno(buf_msg(tskb)) : 0); |
---|
| 2941 | + |
---|
| 2942 | + if (dqueues & TIPC_DUMP_TRANSMQ) { |
---|
| 2943 | + i += scnprintf(buf + i, sz - i, "transmq: "); |
---|
| 2944 | + i += tipc_list_dump(&l->transmq, false, buf + i); |
---|
| 2945 | + } |
---|
| 2946 | + if (dqueues & TIPC_DUMP_BACKLOGQ) { |
---|
| 2947 | + i += scnprintf(buf + i, sz - i, |
---|
| 2948 | + "backlogq: <%u %u %u %u %u>, ", |
---|
| 2949 | + l->backlog[TIPC_LOW_IMPORTANCE].len, |
---|
| 2950 | + l->backlog[TIPC_MEDIUM_IMPORTANCE].len, |
---|
| 2951 | + l->backlog[TIPC_HIGH_IMPORTANCE].len, |
---|
| 2952 | + l->backlog[TIPC_CRITICAL_IMPORTANCE].len, |
---|
| 2953 | + l->backlog[TIPC_SYSTEM_IMPORTANCE].len); |
---|
| 2954 | + i += tipc_list_dump(&l->backlogq, false, buf + i); |
---|
| 2955 | + } |
---|
| 2956 | + if (dqueues & TIPC_DUMP_DEFERDQ) { |
---|
| 2957 | + i += scnprintf(buf + i, sz - i, "deferdq: "); |
---|
| 2958 | + i += tipc_list_dump(&l->deferdq, false, buf + i); |
---|
| 2959 | + } |
---|
| 2960 | + if (dqueues & TIPC_DUMP_INPUTQ) { |
---|
| 2961 | + i += scnprintf(buf + i, sz - i, "inputq: "); |
---|
| 2962 | + i += tipc_list_dump(l->inputq, false, buf + i); |
---|
| 2963 | + } |
---|
| 2964 | + if (dqueues & TIPC_DUMP_WAKEUP) { |
---|
| 2965 | + i += scnprintf(buf + i, sz - i, "wakeup: "); |
---|
| 2966 | + i += tipc_list_dump(&l->wakeupq, false, buf + i); |
---|
| 2967 | + } |
---|
| 2968 | + |
---|
| 2969 | + return i; |
---|
| 2970 | +} |
---|