.. | .. |
---|
39 | 39 | #include "msg.h" |
---|
40 | 40 | #include "addr.h" |
---|
41 | 41 | #include "name_table.h" |
---|
| 42 | +#include "crypto.h" |
---|
42 | 43 | |
---|
43 | 44 | #define MAX_FORWARD_SIZE 1024 |
---|
| 45 | +#ifdef CONFIG_TIPC_CRYPTO |
---|
| 46 | +#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16) |
---|
| 47 | +#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE) |
---|
| 48 | +#else |
---|
44 | 49 | #define BUF_HEADROOM (LL_MAX_HEADER + 48) |
---|
45 | | -#define BUF_TAILROOM 16 |
---|
| 50 | +#define BUF_OVERHEAD BUF_HEADROOM |
---|
| 51 | +#endif |
---|
| 52 | + |
---|
| 53 | +const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) - |
---|
| 54 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
---|
46 | 55 | |
---|
47 | 56 | static unsigned int align(unsigned int i) |
---|
48 | 57 | { |
---|
.. | .. |
---|
61 | 70 | struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp) |
---|
62 | 71 | { |
---|
63 | 72 | struct sk_buff *skb; |
---|
64 | | - unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; |
---|
65 | 73 | |
---|
66 | | - skb = alloc_skb_fclone(buf_size, gfp); |
---|
| 74 | + skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp); |
---|
67 | 75 | if (skb) { |
---|
68 | 76 | skb_reserve(skb, BUF_HEADROOM); |
---|
69 | 77 | skb_put(skb, size); |
---|
.. | .. |
---|
169 | 177 | } |
---|
170 | 178 | |
---|
171 | 179 | if (fragid == LAST_FRAGMENT) { |
---|
172 | | - TIPC_SKB_CB(head)->validated = false; |
---|
| 180 | + TIPC_SKB_CB(head)->validated = 0; |
---|
173 | 181 | if (unlikely(!tipc_msg_validate(&head))) |
---|
174 | 182 | goto err; |
---|
175 | 183 | *buf = head; |
---|
.. | .. |
---|
184 | 192 | kfree_skb(*headbuf); |
---|
185 | 193 | *buf = *headbuf = NULL; |
---|
186 | 194 | return 0; |
---|
| 195 | +} |
---|
| 196 | + |
---|
| 197 | +/** |
---|
| 198 | + * tipc_msg_append(): Append data to tail of an existing buffer queue |
---|
| 199 | + * @_hdr: header to be used |
---|
| 200 | + * @m: the data to be appended |
---|
| 201 | + * @mss: max allowable size of buffer |
---|
| 202 | + * @dlen: size of data to be appended |
---|
| 203 | + * @txq: queue to appand to |
---|
| 204 | + * Returns the number og 1k blocks appended or errno value |
---|
| 205 | + */ |
---|
| 206 | +int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, |
---|
| 207 | + int mss, struct sk_buff_head *txq) |
---|
| 208 | +{ |
---|
| 209 | + struct sk_buff *skb; |
---|
| 210 | + int accounted, total, curr; |
---|
| 211 | + int mlen, cpy, rem = dlen; |
---|
| 212 | + struct tipc_msg *hdr; |
---|
| 213 | + |
---|
| 214 | + skb = skb_peek_tail(txq); |
---|
| 215 | + accounted = skb ? msg_blocks(buf_msg(skb)) : 0; |
---|
| 216 | + total = accounted; |
---|
| 217 | + |
---|
| 218 | + do { |
---|
| 219 | + if (!skb || skb->len >= mss) { |
---|
| 220 | + skb = tipc_buf_acquire(mss, GFP_KERNEL); |
---|
| 221 | + if (unlikely(!skb)) |
---|
| 222 | + return -ENOMEM; |
---|
| 223 | + skb_orphan(skb); |
---|
| 224 | + skb_trim(skb, MIN_H_SIZE); |
---|
| 225 | + hdr = buf_msg(skb); |
---|
| 226 | + skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE); |
---|
| 227 | + msg_set_hdr_sz(hdr, MIN_H_SIZE); |
---|
| 228 | + msg_set_size(hdr, MIN_H_SIZE); |
---|
| 229 | + __skb_queue_tail(txq, skb); |
---|
| 230 | + total += 1; |
---|
| 231 | + } |
---|
| 232 | + hdr = buf_msg(skb); |
---|
| 233 | + curr = msg_blocks(hdr); |
---|
| 234 | + mlen = msg_size(hdr); |
---|
| 235 | + cpy = min_t(size_t, rem, mss - mlen); |
---|
| 236 | + if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) |
---|
| 237 | + return -EFAULT; |
---|
| 238 | + msg_set_size(hdr, mlen + cpy); |
---|
| 239 | + skb_put(skb, cpy); |
---|
| 240 | + rem -= cpy; |
---|
| 241 | + total += msg_blocks(hdr) - curr; |
---|
| 242 | + } while (rem > 0); |
---|
| 243 | + return total - accounted; |
---|
187 | 244 | } |
---|
188 | 245 | |
---|
189 | 246 | /* tipc_msg_validate - validate basic format of received message |
---|
.. | .. |
---|
214 | 271 | |
---|
215 | 272 | if (unlikely(TIPC_SKB_CB(skb)->validated)) |
---|
216 | 273 | return true; |
---|
| 274 | + |
---|
217 | 275 | if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) |
---|
218 | 276 | return false; |
---|
219 | 277 | |
---|
.. | .. |
---|
235 | 293 | if (unlikely(skb->len < msz)) |
---|
236 | 294 | return false; |
---|
237 | 295 | |
---|
238 | | - TIPC_SKB_CB(skb)->validated = true; |
---|
| 296 | + TIPC_SKB_CB(skb)->validated = 1; |
---|
239 | 297 | return true; |
---|
| 298 | +} |
---|
| 299 | + |
---|
| 300 | +/** |
---|
| 301 | + * tipc_msg_fragment - build a fragment skb list for TIPC message |
---|
| 302 | + * |
---|
| 303 | + * @skb: TIPC message skb |
---|
| 304 | + * @hdr: internal msg header to be put on the top of the fragments |
---|
| 305 | + * @pktmax: max size of a fragment incl. the header |
---|
| 306 | + * @frags: returned fragment skb list |
---|
| 307 | + * |
---|
| 308 | + * Returns 0 if the fragmentation is successful, otherwise: -EINVAL |
---|
| 309 | + * or -ENOMEM |
---|
| 310 | + */ |
---|
| 311 | +int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr, |
---|
| 312 | + int pktmax, struct sk_buff_head *frags) |
---|
| 313 | +{ |
---|
| 314 | + int pktno, nof_fragms, dsz, dmax, eat; |
---|
| 315 | + struct tipc_msg *_hdr; |
---|
| 316 | + struct sk_buff *_skb; |
---|
| 317 | + u8 *data; |
---|
| 318 | + |
---|
| 319 | + /* Non-linear buffer? */ |
---|
| 320 | + if (skb_linearize(skb)) |
---|
| 321 | + return -ENOMEM; |
---|
| 322 | + |
---|
| 323 | + data = (u8 *)skb->data; |
---|
| 324 | + dsz = msg_size(buf_msg(skb)); |
---|
| 325 | + dmax = pktmax - INT_H_SIZE; |
---|
| 326 | + if (dsz <= dmax || !dmax) |
---|
| 327 | + return -EINVAL; |
---|
| 328 | + |
---|
| 329 | + nof_fragms = dsz / dmax + 1; |
---|
| 330 | + for (pktno = 1; pktno <= nof_fragms; pktno++) { |
---|
| 331 | + if (pktno < nof_fragms) |
---|
| 332 | + eat = dmax; |
---|
| 333 | + else |
---|
| 334 | + eat = dsz % dmax; |
---|
| 335 | + /* Allocate a new fragment */ |
---|
| 336 | + _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC); |
---|
| 337 | + if (!_skb) |
---|
| 338 | + goto error; |
---|
| 339 | + skb_orphan(_skb); |
---|
| 340 | + __skb_queue_tail(frags, _skb); |
---|
| 341 | + /* Copy header & data to the fragment */ |
---|
| 342 | + skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE); |
---|
| 343 | + skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat); |
---|
| 344 | + data += eat; |
---|
| 345 | + /* Update the fragment's header */ |
---|
| 346 | + _hdr = buf_msg(_skb); |
---|
| 347 | + msg_set_fragm_no(_hdr, pktno); |
---|
| 348 | + msg_set_nof_fragms(_hdr, nof_fragms); |
---|
| 349 | + msg_set_size(_hdr, INT_H_SIZE + eat); |
---|
| 350 | + } |
---|
| 351 | + return 0; |
---|
| 352 | + |
---|
| 353 | +error: |
---|
| 354 | + __skb_queue_purge(frags); |
---|
| 355 | + __skb_queue_head_init(frags); |
---|
| 356 | + return -ENOMEM; |
---|
240 | 357 | } |
---|
241 | 358 | |
---|
242 | 359 | /** |
---|
.. | .. |
---|
276 | 393 | if (unlikely(!skb)) { |
---|
277 | 394 | if (pktmax != MAX_MSG_SIZE) |
---|
278 | 395 | return -ENOMEM; |
---|
279 | | - rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list); |
---|
| 396 | + rc = tipc_msg_build(mhdr, m, offset, dsz, |
---|
| 397 | + one_page_mtu, list); |
---|
280 | 398 | if (rc != dsz) |
---|
281 | 399 | return rc; |
---|
282 | 400 | if (tipc_msg_assemble(list)) |
---|
.. | .. |
---|
356 | 474 | } |
---|
357 | 475 | |
---|
358 | 476 | /** |
---|
359 | | - * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one |
---|
360 | | - * @skb: the buffer to append to ("bundle") |
---|
361 | | - * @msg: message to be appended |
---|
362 | | - * @mtu: max allowable size for the bundle buffer |
---|
363 | | - * Consumes buffer if successful |
---|
364 | | - * Returns true if bundling could be performed, otherwise false |
---|
| 477 | + * tipc_msg_bundle - Append contents of a buffer to tail of an existing one |
---|
| 478 | + * @bskb: the bundle buffer to append to |
---|
| 479 | + * @msg: message to be appended |
---|
| 480 | + * @max: max allowable size for the bundle buffer |
---|
| 481 | + * |
---|
| 482 | + * Returns "true" if bundling has been performed, otherwise "false" |
---|
365 | 483 | */ |
---|
366 | | -bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu) |
---|
| 484 | +static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg, |
---|
| 485 | + u32 max) |
---|
367 | 486 | { |
---|
368 | | - struct tipc_msg *bmsg; |
---|
369 | | - unsigned int bsz; |
---|
370 | | - unsigned int msz = msg_size(msg); |
---|
371 | | - u32 start, pad; |
---|
372 | | - u32 max = mtu - INT_H_SIZE; |
---|
| 487 | + struct tipc_msg *bmsg = buf_msg(bskb); |
---|
| 488 | + u32 msz, bsz, offset, pad; |
---|
373 | 489 | |
---|
374 | | - if (likely(msg_user(msg) == MSG_FRAGMENTER)) |
---|
375 | | - return false; |
---|
376 | | - if (!skb) |
---|
377 | | - return false; |
---|
378 | | - bmsg = buf_msg(skb); |
---|
| 490 | + msz = msg_size(msg); |
---|
379 | 491 | bsz = msg_size(bmsg); |
---|
380 | | - start = align(bsz); |
---|
381 | | - pad = start - bsz; |
---|
| 492 | + offset = align(bsz); |
---|
| 493 | + pad = offset - bsz; |
---|
382 | 494 | |
---|
383 | | - if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL)) |
---|
| 495 | + if (unlikely(skb_tailroom(bskb) < (pad + msz))) |
---|
384 | 496 | return false; |
---|
385 | | - if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) |
---|
386 | | - return false; |
---|
387 | | - if (unlikely(msg_user(bmsg) != MSG_BUNDLER)) |
---|
388 | | - return false; |
---|
389 | | - if (unlikely(skb_tailroom(skb) < (pad + msz))) |
---|
390 | | - return false; |
---|
391 | | - if (unlikely(max < (start + msz))) |
---|
392 | | - return false; |
---|
393 | | - if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) && |
---|
394 | | - (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE)) |
---|
| 497 | + if (unlikely(max < (offset + msz))) |
---|
395 | 498 | return false; |
---|
396 | 499 | |
---|
397 | | - skb_put(skb, pad + msz); |
---|
398 | | - skb_copy_to_linear_data_offset(skb, start, msg, msz); |
---|
399 | | - msg_set_size(bmsg, start + msz); |
---|
| 500 | + skb_put(bskb, pad + msz); |
---|
| 501 | + skb_copy_to_linear_data_offset(bskb, offset, msg, msz); |
---|
| 502 | + msg_set_size(bmsg, offset + msz); |
---|
400 | 503 | msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); |
---|
| 504 | + return true; |
---|
| 505 | +} |
---|
| 506 | + |
---|
| 507 | +/** |
---|
| 508 | + * tipc_msg_try_bundle - Try to bundle a new message to the last one |
---|
| 509 | + * @tskb: the last/target message to which the new one will be appended |
---|
| 510 | + * @skb: the new message skb pointer |
---|
| 511 | + * @mss: max message size (header inclusive) |
---|
| 512 | + * @dnode: destination node for the message |
---|
| 513 | + * @new_bundle: if this call made a new bundle or not |
---|
| 514 | + * |
---|
| 515 | + * Return: "true" if the new message skb is potential for bundling this time or |
---|
| 516 | + * later, in the case a bundling has been done this time, the skb is consumed |
---|
| 517 | + * (the skb pointer = NULL). |
---|
| 518 | + * Otherwise, "false" if the skb cannot be bundled at all. |
---|
| 519 | + */ |
---|
| 520 | +bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss, |
---|
| 521 | + u32 dnode, bool *new_bundle) |
---|
| 522 | +{ |
---|
| 523 | + struct tipc_msg *msg, *inner, *outer; |
---|
| 524 | + u32 tsz; |
---|
| 525 | + |
---|
| 526 | + /* First, check if the new buffer is suitable for bundling */ |
---|
| 527 | + msg = buf_msg(*skb); |
---|
| 528 | + if (msg_user(msg) == MSG_FRAGMENTER) |
---|
| 529 | + return false; |
---|
| 530 | + if (msg_user(msg) == TUNNEL_PROTOCOL) |
---|
| 531 | + return false; |
---|
| 532 | + if (msg_user(msg) == BCAST_PROTOCOL) |
---|
| 533 | + return false; |
---|
| 534 | + if (mss <= INT_H_SIZE + msg_size(msg)) |
---|
| 535 | + return false; |
---|
| 536 | + |
---|
| 537 | + /* Ok, but the last/target buffer can be empty? */ |
---|
| 538 | + if (unlikely(!tskb)) |
---|
| 539 | + return true; |
---|
| 540 | + |
---|
| 541 | + /* Is it a bundle already? Try to bundle the new message to it */ |
---|
| 542 | + if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) { |
---|
| 543 | + *new_bundle = false; |
---|
| 544 | + goto bundle; |
---|
| 545 | + } |
---|
| 546 | + |
---|
| 547 | + /* Make a new bundle of the two messages if possible */ |
---|
| 548 | + tsz = msg_size(buf_msg(tskb)); |
---|
| 549 | + if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg))) |
---|
| 550 | + return true; |
---|
| 551 | + if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE, |
---|
| 552 | + GFP_ATOMIC))) |
---|
| 553 | + return true; |
---|
| 554 | + inner = buf_msg(tskb); |
---|
| 555 | + skb_push(tskb, INT_H_SIZE); |
---|
| 556 | + outer = buf_msg(tskb); |
---|
| 557 | + tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE, |
---|
| 558 | + dnode); |
---|
| 559 | + msg_set_importance(outer, msg_importance(inner)); |
---|
| 560 | + msg_set_size(outer, INT_H_SIZE + tsz); |
---|
| 561 | + msg_set_msgcnt(outer, 1); |
---|
| 562 | + *new_bundle = true; |
---|
| 563 | + |
---|
| 564 | +bundle: |
---|
| 565 | + if (likely(tipc_msg_bundle(tskb, msg, mss))) { |
---|
| 566 | + consume_skb(*skb); |
---|
| 567 | + *skb = NULL; |
---|
| 568 | + } |
---|
401 | 569 | return true; |
---|
402 | 570 | } |
---|
403 | 571 | |
---|
.. | .. |
---|
408 | 576 | * @pos: position in outer message of msg to be extracted. |
---|
409 | 577 | * Returns position of next msg |
---|
410 | 578 | * Consumes outer buffer when last packet extracted |
---|
411 | | - * Returns true when when there is an extracted buffer, otherwise false |
---|
| 579 | + * Returns true when there is an extracted buffer, otherwise false |
---|
412 | 580 | */ |
---|
413 | 581 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) |
---|
414 | 582 | { |
---|
.. | .. |
---|
447 | 615 | } |
---|
448 | 616 | |
---|
449 | 617 | /** |
---|
450 | | - * tipc_msg_make_bundle(): Create bundle buf and append message to its tail |
---|
451 | | - * @list: the buffer chain, where head is the buffer to replace/append |
---|
452 | | - * @skb: buffer to be created, appended to and returned in case of success |
---|
453 | | - * @msg: message to be appended |
---|
454 | | - * @mtu: max allowable size for the bundle buffer, inclusive header |
---|
455 | | - * @dnode: destination node for message. (Not always present in header) |
---|
456 | | - * Returns true if success, otherwise false |
---|
457 | | - */ |
---|
458 | | -bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, |
---|
459 | | - u32 mtu, u32 dnode) |
---|
460 | | -{ |
---|
461 | | - struct sk_buff *_skb; |
---|
462 | | - struct tipc_msg *bmsg; |
---|
463 | | - u32 msz = msg_size(msg); |
---|
464 | | - u32 max = mtu - INT_H_SIZE; |
---|
465 | | - |
---|
466 | | - if (msg_user(msg) == MSG_FRAGMENTER) |
---|
467 | | - return false; |
---|
468 | | - if (msg_user(msg) == TUNNEL_PROTOCOL) |
---|
469 | | - return false; |
---|
470 | | - if (msg_user(msg) == BCAST_PROTOCOL) |
---|
471 | | - return false; |
---|
472 | | - if (msz > (max / 2)) |
---|
473 | | - return false; |
---|
474 | | - |
---|
475 | | - _skb = tipc_buf_acquire(max, GFP_ATOMIC); |
---|
476 | | - if (!_skb) |
---|
477 | | - return false; |
---|
478 | | - |
---|
479 | | - skb_trim(_skb, INT_H_SIZE); |
---|
480 | | - bmsg = buf_msg(_skb); |
---|
481 | | - tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0, |
---|
482 | | - INT_H_SIZE, dnode); |
---|
483 | | - msg_set_importance(bmsg, msg_importance(msg)); |
---|
484 | | - msg_set_seqno(bmsg, msg_seqno(msg)); |
---|
485 | | - msg_set_ack(bmsg, msg_ack(msg)); |
---|
486 | | - msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); |
---|
487 | | - tipc_msg_bundle(_skb, msg, mtu); |
---|
488 | | - *skb = _skb; |
---|
489 | | - return true; |
---|
490 | | -} |
---|
491 | | - |
---|
492 | | -/** |
---|
493 | 618 | * tipc_msg_reverse(): swap source and destination addresses and add error code |
---|
494 | 619 | * @own_node: originating node id for reversed message |
---|
495 | | - * @skb: buffer containing message to be reversed; may be replaced. |
---|
| 620 | + * @skb: buffer containing message to be reversed; will be consumed |
---|
496 | 621 | * @err: error code to be set in message, if any |
---|
497 | | - * Consumes buffer at failure |
---|
| 622 | + * Replaces consumed buffer with new one when successful |
---|
498 | 623 | * Returns true if success, otherwise false |
---|
499 | 624 | */ |
---|
500 | 625 | bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) |
---|
501 | 626 | { |
---|
502 | 627 | struct sk_buff *_skb = *skb; |
---|
503 | | - struct tipc_msg *hdr; |
---|
504 | | - struct tipc_msg ohdr; |
---|
505 | | - int dlen; |
---|
| 628 | + struct tipc_msg *_hdr, *hdr; |
---|
| 629 | + int hlen, dlen; |
---|
506 | 630 | |
---|
507 | 631 | if (skb_linearize(_skb)) |
---|
508 | 632 | goto exit; |
---|
509 | | - hdr = buf_msg(_skb); |
---|
510 | | - dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE); |
---|
511 | | - if (msg_dest_droppable(hdr)) |
---|
| 633 | + _hdr = buf_msg(_skb); |
---|
| 634 | + dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE); |
---|
| 635 | + hlen = msg_hdr_sz(_hdr); |
---|
| 636 | + |
---|
| 637 | + if (msg_dest_droppable(_hdr)) |
---|
512 | 638 | goto exit; |
---|
513 | | - if (msg_errcode(hdr)) |
---|
| 639 | + if (msg_errcode(_hdr)) |
---|
514 | 640 | goto exit; |
---|
515 | 641 | |
---|
516 | | - /* Take a copy of original header before altering message */ |
---|
517 | | - memcpy(&ohdr, hdr, msg_hdr_sz(hdr)); |
---|
| 642 | + /* Never return SHORT header */ |
---|
| 643 | + if (hlen == SHORT_H_SIZE) |
---|
| 644 | + hlen = BASIC_H_SIZE; |
---|
518 | 645 | |
---|
519 | | - /* Never return SHORT header; expand by replacing buffer if necessary */ |
---|
520 | | - if (msg_short(hdr)) { |
---|
521 | | - *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC); |
---|
522 | | - if (!*skb) |
---|
523 | | - goto exit; |
---|
524 | | - memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen); |
---|
525 | | - kfree_skb(_skb); |
---|
526 | | - _skb = *skb; |
---|
527 | | - hdr = buf_msg(_skb); |
---|
528 | | - memcpy(hdr, &ohdr, BASIC_H_SIZE); |
---|
529 | | - msg_set_hdr_sz(hdr, BASIC_H_SIZE); |
---|
530 | | - } |
---|
| 646 | + /* Don't return data along with SYN+, - sender has a clone */ |
---|
| 647 | + if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD) |
---|
| 648 | + dlen = 0; |
---|
531 | 649 | |
---|
532 | | - /* Now reverse the concerned fields */ |
---|
| 650 | + /* Allocate new buffer to return */ |
---|
| 651 | + *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC); |
---|
| 652 | + if (!*skb) |
---|
| 653 | + goto exit; |
---|
| 654 | + memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr)); |
---|
| 655 | + memcpy((*skb)->data + hlen, msg_data(_hdr), dlen); |
---|
| 656 | + |
---|
| 657 | + /* Build reverse header in new buffer */ |
---|
| 658 | + hdr = buf_msg(*skb); |
---|
| 659 | + msg_set_hdr_sz(hdr, hlen); |
---|
533 | 660 | msg_set_errcode(hdr, err); |
---|
534 | 661 | msg_set_non_seq(hdr, 0); |
---|
535 | | - msg_set_origport(hdr, msg_destport(&ohdr)); |
---|
536 | | - msg_set_destport(hdr, msg_origport(&ohdr)); |
---|
537 | | - msg_set_destnode(hdr, msg_prevnode(&ohdr)); |
---|
| 662 | + msg_set_origport(hdr, msg_destport(_hdr)); |
---|
| 663 | + msg_set_destport(hdr, msg_origport(_hdr)); |
---|
| 664 | + msg_set_destnode(hdr, msg_prevnode(_hdr)); |
---|
538 | 665 | msg_set_prevnode(hdr, own_node); |
---|
539 | 666 | msg_set_orignode(hdr, own_node); |
---|
540 | | - msg_set_size(hdr, msg_hdr_sz(hdr) + dlen); |
---|
541 | | - skb_trim(_skb, msg_size(hdr)); |
---|
| 667 | + msg_set_size(hdr, hlen + dlen); |
---|
542 | 668 | skb_orphan(_skb); |
---|
| 669 | + kfree_skb(_skb); |
---|
543 | 670 | return true; |
---|
544 | 671 | exit: |
---|
545 | 672 | kfree_skb(_skb); |
---|
546 | 673 | *skb = NULL; |
---|
547 | 674 | return false; |
---|
| 675 | +} |
---|
| 676 | + |
---|
| 677 | +bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy) |
---|
| 678 | +{ |
---|
| 679 | + struct sk_buff *skb, *_skb; |
---|
| 680 | + |
---|
| 681 | + skb_queue_walk(msg, skb) { |
---|
| 682 | + _skb = skb_clone(skb, GFP_ATOMIC); |
---|
| 683 | + if (!_skb) { |
---|
| 684 | + __skb_queue_purge(cpy); |
---|
| 685 | + pr_err_ratelimited("Failed to clone buffer chain\n"); |
---|
| 686 | + return false; |
---|
| 687 | + } |
---|
| 688 | + __skb_queue_tail(cpy, _skb); |
---|
| 689 | + } |
---|
| 690 | + return true; |
---|
548 | 691 | } |
---|
549 | 692 | |
---|
550 | 693 | /** |
---|
.. | .. |
---|
583 | 726 | msg_set_destnode(msg, dnode); |
---|
584 | 727 | msg_set_destport(msg, dport); |
---|
585 | 728 | *err = TIPC_OK; |
---|
586 | | - |
---|
587 | | - if (!skb_cloned(skb)) |
---|
588 | | - return true; |
---|
589 | 729 | |
---|
590 | 730 | return true; |
---|
591 | 731 | } |
---|
.. | .. |
---|
676 | 816 | * @seqno: sequence number of buffer to add |
---|
677 | 817 | * @skb: buffer to add |
---|
678 | 818 | */ |
---|
679 | | -void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, |
---|
| 819 | +bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, |
---|
680 | 820 | struct sk_buff *skb) |
---|
681 | 821 | { |
---|
682 | 822 | struct sk_buff *_skb, *tmp; |
---|
683 | 823 | |
---|
684 | 824 | if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) { |
---|
685 | 825 | __skb_queue_head(list, skb); |
---|
686 | | - return; |
---|
| 826 | + return true; |
---|
687 | 827 | } |
---|
688 | 828 | |
---|
689 | 829 | if (more(seqno, buf_seqno(skb_peek_tail(list)))) { |
---|
690 | 830 | __skb_queue_tail(list, skb); |
---|
691 | | - return; |
---|
| 831 | + return true; |
---|
692 | 832 | } |
---|
693 | 833 | |
---|
694 | 834 | skb_queue_walk_safe(list, _skb, tmp) { |
---|
.. | .. |
---|
697 | 837 | if (seqno == buf_seqno(_skb)) |
---|
698 | 838 | break; |
---|
699 | 839 | __skb_queue_before(list, _skb, skb); |
---|
700 | | - return; |
---|
| 840 | + return true; |
---|
701 | 841 | } |
---|
702 | 842 | kfree_skb(skb); |
---|
| 843 | + return false; |
---|
703 | 844 | } |
---|
704 | 845 | |
---|
705 | 846 | void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb, |
---|