| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier. |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or |
|---|
| 5 | | - * modify it under the terms of the GNU General Public License |
|---|
| 6 | | - * as published by the Free Software Foundation; either version |
|---|
| 7 | | - * 2 of the License, or (at your option) any later version. |
|---|
| 8 | 4 | * |
|---|
| 9 | 5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
|---|
| 10 | 6 | * |
|---|
| .. | .. |
|---|
| 23 | 19 | * It is especially useful for link sharing combined with QoS; |
|---|
| 24 | 20 | * pure RSVP doesn't need such a general approach and can use |
|---|
| 25 | 21 | * much simpler (and faster) schemes, sort of cls_rsvp.c. |
|---|
| 26 | | - * |
|---|
| 27 | | - * JHS: We should remove the CONFIG_NET_CLS_IND from here |
|---|
| 28 | | - * eventually when the meta match extension is made available |
|---|
| 29 | 22 | * |
|---|
| 30 | 23 | * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro> |
|---|
| 31 | 24 | */ |
|---|
| .. | .. |
|---|
| 52 | 45 | u32 handle; |
|---|
| 53 | 46 | struct tc_u_hnode __rcu *ht_up; |
|---|
| 54 | 47 | struct tcf_exts exts; |
|---|
| 55 | | -#ifdef CONFIG_NET_CLS_IND |
|---|
| 56 | 48 | int ifindex; |
|---|
| 57 | | -#endif |
|---|
| 58 | 49 | u8 fshift; |
|---|
| 59 | 50 | struct tcf_result res; |
|---|
| 60 | 51 | struct tc_u_hnode __rcu *ht_down; |
|---|
| .. | .. |
|---|
| 68 | 59 | u32 mask; |
|---|
| 69 | 60 | u32 __percpu *pcpu_success; |
|---|
| 70 | 61 | #endif |
|---|
| 71 | | - struct tcf_proto *tp; |
|---|
| 72 | 62 | struct rcu_work rwork; |
|---|
| 73 | 63 | /* The 'sel' field MUST be the last field in structure to allow for |
|---|
| 74 | 64 | * tc_u32_keys allocated at end of structure. |
|---|
| .. | .. |
|---|
| 80 | 70 | struct tc_u_hnode __rcu *next; |
|---|
| 81 | 71 | u32 handle; |
|---|
| 82 | 72 | u32 prio; |
|---|
| 83 | | - struct tc_u_common *tp_c; |
|---|
| 84 | 73 | int refcnt; |
|---|
| 85 | 74 | unsigned int divisor; |
|---|
| 86 | 75 | struct idr handle_idr; |
|---|
| 76 | + bool is_root; |
|---|
| 87 | 77 | struct rcu_head rcu; |
|---|
| 88 | 78 | u32 flags; |
|---|
| 89 | 79 | /* The 'ht' field MUST be the last field in structure to allow for |
|---|
| 90 | 80 | * more entries allocated at end of structure. |
|---|
| 91 | 81 | */ |
|---|
| 92 | | - struct tc_u_knode __rcu *ht[1]; |
|---|
| 82 | + struct tc_u_knode __rcu *ht[]; |
|---|
| 93 | 83 | }; |
|---|
| 94 | 84 | |
|---|
| 95 | 85 | struct tc_u_common { |
|---|
| .. | .. |
|---|
| 98 | 88 | int refcnt; |
|---|
| 99 | 89 | struct idr handle_idr; |
|---|
| 100 | 90 | struct hlist_node hnode; |
|---|
| 101 | | - struct rcu_head rcu; |
|---|
| 91 | + long knodes; |
|---|
| 102 | 92 | }; |
|---|
| 103 | 93 | |
|---|
| 104 | 94 | static inline unsigned int u32_hash_fold(__be32 key, |
|---|
| .. | .. |
|---|
| 181 | 171 | if (n->sel.flags & TC_U32_TERMINAL) { |
|---|
| 182 | 172 | |
|---|
| 183 | 173 | *res = n->res; |
|---|
| 184 | | -#ifdef CONFIG_NET_CLS_IND |
|---|
| 185 | 174 | if (!tcf_match_indev(skb, n->ifindex)) { |
|---|
| 186 | 175 | n = rcu_dereference_bh(n->next); |
|---|
| 187 | 176 | goto next_knode; |
|---|
| 188 | 177 | } |
|---|
| 189 | | -#endif |
|---|
| 190 | 178 | #ifdef CONFIG_CLS_U32_PERF |
|---|
| 191 | 179 | __this_cpu_inc(n->pf->rhit); |
|---|
| 192 | 180 | #endif |
|---|
| .. | .. |
|---|
| 344 | 332 | return block->q; |
|---|
| 345 | 333 | } |
|---|
| 346 | 334 | |
|---|
| 347 | | -static unsigned int tc_u_hash(const struct tcf_proto *tp) |
|---|
| 335 | +static struct hlist_head *tc_u_hash(void *key) |
|---|
| 348 | 336 | { |
|---|
| 349 | | - return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT); |
|---|
| 337 | + return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT); |
|---|
| 350 | 338 | } |
|---|
| 351 | 339 | |
|---|
| 352 | | -static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) |
|---|
| 340 | +static struct tc_u_common *tc_u_common_find(void *key) |
|---|
| 353 | 341 | { |
|---|
| 354 | 342 | struct tc_u_common *tc; |
|---|
| 355 | | - unsigned int h; |
|---|
| 356 | | - |
|---|
| 357 | | - h = tc_u_hash(tp); |
|---|
| 358 | | - hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) { |
|---|
| 359 | | - if (tc->ptr == tc_u_common_ptr(tp)) |
|---|
| 343 | + hlist_for_each_entry(tc, tc_u_hash(key), hnode) { |
|---|
| 344 | + if (tc->ptr == key) |
|---|
| 360 | 345 | return tc; |
|---|
| 361 | 346 | } |
|---|
| 362 | 347 | return NULL; |
|---|
| .. | .. |
|---|
| 365 | 350 | static int u32_init(struct tcf_proto *tp) |
|---|
| 366 | 351 | { |
|---|
| 367 | 352 | struct tc_u_hnode *root_ht; |
|---|
| 368 | | - struct tc_u_common *tp_c; |
|---|
| 369 | | - unsigned int h; |
|---|
| 353 | + void *key = tc_u_common_ptr(tp); |
|---|
| 354 | + struct tc_u_common *tp_c = tc_u_common_find(key); |
|---|
| 370 | 355 | |
|---|
| 371 | | - tp_c = tc_u_common_find(tp); |
|---|
| 372 | | - |
|---|
| 373 | | - root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); |
|---|
| 356 | + root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL); |
|---|
| 374 | 357 | if (root_ht == NULL) |
|---|
| 375 | 358 | return -ENOBUFS; |
|---|
| 376 | 359 | |
|---|
| 377 | 360 | root_ht->refcnt++; |
|---|
| 378 | 361 | root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000; |
|---|
| 379 | 362 | root_ht->prio = tp->prio; |
|---|
| 363 | + root_ht->is_root = true; |
|---|
| 380 | 364 | idr_init(&root_ht->handle_idr); |
|---|
| 381 | 365 | |
|---|
| 382 | 366 | if (tp_c == NULL) { |
|---|
| 383 | | - tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL); |
|---|
| 367 | + tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL); |
|---|
| 384 | 368 | if (tp_c == NULL) { |
|---|
| 385 | 369 | kfree(root_ht); |
|---|
| 386 | 370 | return -ENOBUFS; |
|---|
| 387 | 371 | } |
|---|
| 388 | | - tp_c->ptr = tc_u_common_ptr(tp); |
|---|
| 372 | + tp_c->ptr = key; |
|---|
| 389 | 373 | INIT_HLIST_NODE(&tp_c->hnode); |
|---|
| 390 | 374 | idr_init(&tp_c->handle_idr); |
|---|
| 391 | 375 | |
|---|
| 392 | | - h = tc_u_hash(tp); |
|---|
| 393 | | - hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]); |
|---|
| 376 | + hlist_add_head(&tp_c->hnode, tc_u_hash(key)); |
|---|
| 394 | 377 | } |
|---|
| 395 | 378 | |
|---|
| 396 | 379 | tp_c->refcnt++; |
|---|
| 397 | 380 | RCU_INIT_POINTER(root_ht->next, tp_c->hlist); |
|---|
| 398 | 381 | rcu_assign_pointer(tp_c->hlist, root_ht); |
|---|
| 399 | | - root_ht->tp_c = tp_c; |
|---|
| 400 | 382 | |
|---|
| 401 | 383 | root_ht->refcnt++; |
|---|
| 402 | 384 | rcu_assign_pointer(tp->root, root_ht); |
|---|
| .. | .. |
|---|
| 404 | 386 | return 0; |
|---|
| 405 | 387 | } |
|---|
| 406 | 388 | |
|---|
| 407 | | -static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n, |
|---|
| 408 | | - bool free_pf) |
|---|
| 389 | +static void __u32_destroy_key(struct tc_u_knode *n) |
|---|
| 409 | 390 | { |
|---|
| 410 | 391 | struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); |
|---|
| 411 | 392 | |
|---|
| 412 | 393 | tcf_exts_destroy(&n->exts); |
|---|
| 413 | | - tcf_exts_put_net(&n->exts); |
|---|
| 414 | 394 | if (ht && --ht->refcnt == 0) |
|---|
| 415 | 395 | kfree(ht); |
|---|
| 396 | + kfree(n); |
|---|
| 397 | +} |
|---|
| 398 | + |
|---|
| 399 | +static void u32_destroy_key(struct tc_u_knode *n, bool free_pf) |
|---|
| 400 | +{ |
|---|
| 401 | + tcf_exts_put_net(&n->exts); |
|---|
| 416 | 402 | #ifdef CONFIG_CLS_U32_PERF |
|---|
| 417 | 403 | if (free_pf) |
|---|
| 418 | 404 | free_percpu(n->pf); |
|---|
| .. | .. |
|---|
| 421 | 407 | if (free_pf) |
|---|
| 422 | 408 | free_percpu(n->pcpu_success); |
|---|
| 423 | 409 | #endif |
|---|
| 424 | | - kfree(n); |
|---|
| 425 | | - return 0; |
|---|
| 410 | + __u32_destroy_key(n); |
|---|
| 426 | 411 | } |
|---|
| 427 | 412 | |
|---|
| 428 | 413 | /* u32_delete_key_rcu should be called when free'ing a copied |
|---|
| .. | .. |
|---|
| 439 | 424 | struct tc_u_knode, |
|---|
| 440 | 425 | rwork); |
|---|
| 441 | 426 | rtnl_lock(); |
|---|
| 442 | | - u32_destroy_key(key->tp, key, false); |
|---|
| 427 | + u32_destroy_key(key, false); |
|---|
| 443 | 428 | rtnl_unlock(); |
|---|
| 444 | 429 | } |
|---|
| 445 | 430 | |
|---|
| .. | .. |
|---|
| 456 | 441 | struct tc_u_knode, |
|---|
| 457 | 442 | rwork); |
|---|
| 458 | 443 | rtnl_lock(); |
|---|
| 459 | | - u32_destroy_key(key->tp, key, true); |
|---|
| 444 | + u32_destroy_key(key, true); |
|---|
| 460 | 445 | rtnl_unlock(); |
|---|
| 461 | 446 | } |
|---|
| 462 | 447 | |
|---|
| 463 | 448 | static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) |
|---|
| 464 | 449 | { |
|---|
| 450 | + struct tc_u_common *tp_c = tp->data; |
|---|
| 465 | 451 | struct tc_u_knode __rcu **kp; |
|---|
| 466 | 452 | struct tc_u_knode *pkp; |
|---|
| 467 | 453 | struct tc_u_hnode *ht = rtnl_dereference(key->ht_up); |
|---|
| .. | .. |
|---|
| 472 | 458 | kp = &pkp->next, pkp = rtnl_dereference(*kp)) { |
|---|
| 473 | 459 | if (pkp == key) { |
|---|
| 474 | 460 | RCU_INIT_POINTER(*kp, key->next); |
|---|
| 461 | + tp_c->knodes--; |
|---|
| 475 | 462 | |
|---|
| 476 | 463 | tcf_unbind_filter(tp, &key->res); |
|---|
| 477 | 464 | idr_remove(&ht->handle_idr, key->handle); |
|---|
| .. | .. |
|---|
| 497 | 484 | cls_u32.hnode.handle = h->handle; |
|---|
| 498 | 485 | cls_u32.hnode.prio = h->prio; |
|---|
| 499 | 486 | |
|---|
| 500 | | - tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); |
|---|
| 487 | + tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true); |
|---|
| 501 | 488 | } |
|---|
| 502 | 489 | |
|---|
| 503 | 490 | static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, |
|---|
| .. | .. |
|---|
| 515 | 502 | cls_u32.hnode.handle = h->handle; |
|---|
| 516 | 503 | cls_u32.hnode.prio = h->prio; |
|---|
| 517 | 504 | |
|---|
| 518 | | - err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); |
|---|
| 505 | + err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true); |
|---|
| 519 | 506 | if (err < 0) { |
|---|
| 520 | 507 | u32_clear_hw_hnode(tp, h, NULL); |
|---|
| 521 | 508 | return err; |
|---|
| .. | .. |
|---|
| 539 | 526 | cls_u32.command = TC_CLSU32_DELETE_KNODE; |
|---|
| 540 | 527 | cls_u32.knode.handle = n->handle; |
|---|
| 541 | 528 | |
|---|
| 542 | | - tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); |
|---|
| 543 | | - tcf_block_offload_dec(block, &n->flags); |
|---|
| 529 | + tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false, |
|---|
| 530 | + &n->flags, &n->in_hw_count, true); |
|---|
| 544 | 531 | } |
|---|
| 545 | 532 | |
|---|
| 546 | 533 | static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, |
|---|
| .. | .. |
|---|
| 564 | 551 | cls_u32.knode.mask = 0; |
|---|
| 565 | 552 | #endif |
|---|
| 566 | 553 | cls_u32.knode.sel = &n->sel; |
|---|
| 554 | + cls_u32.knode.res = &n->res; |
|---|
| 567 | 555 | cls_u32.knode.exts = &n->exts; |
|---|
| 568 | 556 | if (n->ht_down) |
|---|
| 569 | 557 | cls_u32.knode.link_handle = ht->handle; |
|---|
| 570 | 558 | |
|---|
| 571 | | - err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); |
|---|
| 572 | | - if (err < 0) { |
|---|
| 559 | + err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw, |
|---|
| 560 | + &n->flags, &n->in_hw_count, true); |
|---|
| 561 | + if (err) { |
|---|
| 573 | 562 | u32_remove_hw_knode(tp, n, NULL); |
|---|
| 574 | 563 | return err; |
|---|
| 575 | | - } else if (err > 0) { |
|---|
| 576 | | - n->in_hw_count = err; |
|---|
| 577 | | - tcf_block_offload_inc(block, &n->flags); |
|---|
| 578 | 564 | } |
|---|
| 579 | 565 | |
|---|
| 580 | 566 | if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW)) |
|---|
| .. | .. |
|---|
| 586 | 572 | static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, |
|---|
| 587 | 573 | struct netlink_ext_ack *extack) |
|---|
| 588 | 574 | { |
|---|
| 575 | + struct tc_u_common *tp_c = tp->data; |
|---|
| 589 | 576 | struct tc_u_knode *n; |
|---|
| 590 | 577 | unsigned int h; |
|---|
| 591 | 578 | |
|---|
| .. | .. |
|---|
| 593 | 580 | while ((n = rtnl_dereference(ht->ht[h])) != NULL) { |
|---|
| 594 | 581 | RCU_INIT_POINTER(ht->ht[h], |
|---|
| 595 | 582 | rtnl_dereference(n->next)); |
|---|
| 583 | + tp_c->knodes--; |
|---|
| 596 | 584 | tcf_unbind_filter(tp, &n->res); |
|---|
| 597 | 585 | u32_remove_hw_knode(tp, n, extack); |
|---|
| 598 | 586 | idr_remove(&ht->handle_idr, n->handle); |
|---|
| 599 | 587 | if (tcf_exts_get_net(&n->exts)) |
|---|
| 600 | 588 | tcf_queue_work(&n->rwork, u32_delete_key_freepf_work); |
|---|
| 601 | 589 | else |
|---|
| 602 | | - u32_destroy_key(n->tp, n, true); |
|---|
| 590 | + u32_destroy_key(n, true); |
|---|
| 603 | 591 | } |
|---|
| 604 | 592 | } |
|---|
| 605 | 593 | } |
|---|
| .. | .. |
|---|
| 632 | 620 | return -ENOENT; |
|---|
| 633 | 621 | } |
|---|
| 634 | 622 | |
|---|
| 635 | | -static bool ht_empty(struct tc_u_hnode *ht) |
|---|
| 636 | | -{ |
|---|
| 637 | | - unsigned int h; |
|---|
| 638 | | - |
|---|
| 639 | | - for (h = 0; h <= ht->divisor; h++) |
|---|
| 640 | | - if (rcu_access_pointer(ht->ht[h])) |
|---|
| 641 | | - return false; |
|---|
| 642 | | - |
|---|
| 643 | | - return true; |
|---|
| 644 | | -} |
|---|
| 645 | | - |
|---|
| 646 | | -static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) |
|---|
| 623 | +static void u32_destroy(struct tcf_proto *tp, bool rtnl_held, |
|---|
| 624 | + struct netlink_ext_ack *extack) |
|---|
| 647 | 625 | { |
|---|
| 648 | 626 | struct tc_u_common *tp_c = tp->data; |
|---|
| 649 | 627 | struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); |
|---|
| .. | .. |
|---|
| 677 | 655 | } |
|---|
| 678 | 656 | |
|---|
| 679 | 657 | static int u32_delete(struct tcf_proto *tp, void *arg, bool *last, |
|---|
| 680 | | - struct netlink_ext_ack *extack) |
|---|
| 658 | + bool rtnl_held, struct netlink_ext_ack *extack) |
|---|
| 681 | 659 | { |
|---|
| 682 | 660 | struct tc_u_hnode *ht = arg; |
|---|
| 683 | | - struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); |
|---|
| 684 | 661 | struct tc_u_common *tp_c = tp->data; |
|---|
| 685 | 662 | int ret = 0; |
|---|
| 686 | | - |
|---|
| 687 | | - if (ht == NULL) |
|---|
| 688 | | - goto out; |
|---|
| 689 | 663 | |
|---|
| 690 | 664 | if (TC_U32_KEY(ht->handle)) { |
|---|
| 691 | 665 | u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack); |
|---|
| .. | .. |
|---|
| 693 | 667 | goto out; |
|---|
| 694 | 668 | } |
|---|
| 695 | 669 | |
|---|
| 696 | | - if (root_ht == ht) { |
|---|
| 670 | + if (ht->is_root) { |
|---|
| 697 | 671 | NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node"); |
|---|
| 698 | 672 | return -EINVAL; |
|---|
| 699 | 673 | } |
|---|
| .. | .. |
|---|
| 706 | 680 | } |
|---|
| 707 | 681 | |
|---|
| 708 | 682 | out: |
|---|
| 709 | | - *last = true; |
|---|
| 710 | | - if (root_ht) { |
|---|
| 711 | | - if (root_ht->refcnt > 2) { |
|---|
| 712 | | - *last = false; |
|---|
| 713 | | - goto ret; |
|---|
| 714 | | - } |
|---|
| 715 | | - if (root_ht->refcnt == 2) { |
|---|
| 716 | | - if (!ht_empty(root_ht)) { |
|---|
| 717 | | - *last = false; |
|---|
| 718 | | - goto ret; |
|---|
| 719 | | - } |
|---|
| 720 | | - } |
|---|
| 721 | | - } |
|---|
| 722 | | - |
|---|
| 723 | | - if (tp_c->refcnt > 1) { |
|---|
| 724 | | - *last = false; |
|---|
| 725 | | - goto ret; |
|---|
| 726 | | - } |
|---|
| 727 | | - |
|---|
| 728 | | - if (tp_c->refcnt == 1) { |
|---|
| 729 | | - struct tc_u_hnode *ht; |
|---|
| 730 | | - |
|---|
| 731 | | - for (ht = rtnl_dereference(tp_c->hlist); |
|---|
| 732 | | - ht; |
|---|
| 733 | | - ht = rtnl_dereference(ht->next)) |
|---|
| 734 | | - if (!ht_empty(ht)) { |
|---|
| 735 | | - *last = false; |
|---|
| 736 | | - break; |
|---|
| 737 | | - } |
|---|
| 738 | | - } |
|---|
| 739 | | - |
|---|
| 740 | | -ret: |
|---|
| 683 | + *last = tp_c->refcnt == 1 && tp_c->knodes == 0; |
|---|
| 741 | 684 | return ret; |
|---|
| 742 | 685 | } |
|---|
| 743 | 686 | |
|---|
| .. | .. |
|---|
| 768 | 711 | }; |
|---|
| 769 | 712 | |
|---|
| 770 | 713 | static int u32_set_parms(struct net *net, struct tcf_proto *tp, |
|---|
| 771 | | - unsigned long base, struct tc_u_hnode *ht, |
|---|
| 714 | + unsigned long base, |
|---|
| 772 | 715 | struct tc_u_knode *n, struct nlattr **tb, |
|---|
| 773 | 716 | struct nlattr *est, bool ovr, |
|---|
| 774 | 717 | struct netlink_ext_ack *extack) |
|---|
| 775 | 718 | { |
|---|
| 776 | | - int err; |
|---|
| 719 | + int err, ifindex = -1; |
|---|
| 777 | 720 | |
|---|
| 778 | | - err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, extack); |
|---|
| 721 | + err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack); |
|---|
| 779 | 722 | if (err < 0) |
|---|
| 780 | 723 | return err; |
|---|
| 724 | + |
|---|
| 725 | + if (tb[TCA_U32_INDEV]) { |
|---|
| 726 | + ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack); |
|---|
| 727 | + if (ifindex < 0) |
|---|
| 728 | + return -EINVAL; |
|---|
| 729 | + } |
|---|
| 781 | 730 | |
|---|
| 782 | 731 | if (tb[TCA_U32_LINK]) { |
|---|
| 783 | 732 | u32 handle = nla_get_u32(tb[TCA_U32_LINK]); |
|---|
| .. | .. |
|---|
| 789 | 738 | } |
|---|
| 790 | 739 | |
|---|
| 791 | 740 | if (handle) { |
|---|
| 792 | | - ht_down = u32_lookup_ht(ht->tp_c, handle); |
|---|
| 741 | + ht_down = u32_lookup_ht(tp->data, handle); |
|---|
| 793 | 742 | |
|---|
| 794 | 743 | if (!ht_down) { |
|---|
| 795 | 744 | NL_SET_ERR_MSG_MOD(extack, "Link hash table not found"); |
|---|
| 745 | + return -EINVAL; |
|---|
| 746 | + } |
|---|
| 747 | + if (ht_down->is_root) { |
|---|
| 748 | + NL_SET_ERR_MSG_MOD(extack, "Not linking to root node"); |
|---|
| 796 | 749 | return -EINVAL; |
|---|
| 797 | 750 | } |
|---|
| 798 | 751 | ht_down->refcnt++; |
|---|
| .. | .. |
|---|
| 809 | 762 | tcf_bind_filter(tp, &n->res, base); |
|---|
| 810 | 763 | } |
|---|
| 811 | 764 | |
|---|
| 812 | | -#ifdef CONFIG_NET_CLS_IND |
|---|
| 813 | | - if (tb[TCA_U32_INDEV]) { |
|---|
| 814 | | - int ret; |
|---|
| 815 | | - ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack); |
|---|
| 816 | | - if (ret < 0) |
|---|
| 817 | | - return -EINVAL; |
|---|
| 818 | | - n->ifindex = ret; |
|---|
| 819 | | - } |
|---|
| 820 | | -#endif |
|---|
| 765 | + if (ifindex >= 0) |
|---|
| 766 | + n->ifindex = ifindex; |
|---|
| 767 | + |
|---|
| 821 | 768 | return 0; |
|---|
| 822 | 769 | } |
|---|
| 823 | 770 | |
|---|
| .. | .. |
|---|
| 848 | 795 | rcu_assign_pointer(*ins, n); |
|---|
| 849 | 796 | } |
|---|
| 850 | 797 | |
|---|
| 851 | | -static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp, |
|---|
| 798 | +static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, |
|---|
| 852 | 799 | struct tc_u_knode *n) |
|---|
| 853 | 800 | { |
|---|
| 854 | 801 | struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); |
|---|
| 855 | 802 | struct tc_u32_sel *s = &n->sel; |
|---|
| 856 | 803 | struct tc_u_knode *new; |
|---|
| 857 | 804 | |
|---|
| 858 | | - new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), |
|---|
| 859 | | - GFP_KERNEL); |
|---|
| 860 | | - |
|---|
| 805 | + new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL); |
|---|
| 861 | 806 | if (!new) |
|---|
| 862 | 807 | return NULL; |
|---|
| 863 | 808 | |
|---|
| .. | .. |
|---|
| 865 | 810 | new->handle = n->handle; |
|---|
| 866 | 811 | RCU_INIT_POINTER(new->ht_up, n->ht_up); |
|---|
| 867 | 812 | |
|---|
| 868 | | -#ifdef CONFIG_NET_CLS_IND |
|---|
| 869 | 813 | new->ifindex = n->ifindex; |
|---|
| 870 | | -#endif |
|---|
| 871 | 814 | new->fshift = n->fshift; |
|---|
| 872 | | - new->res = n->res; |
|---|
| 873 | 815 | new->flags = n->flags; |
|---|
| 874 | 816 | RCU_INIT_POINTER(new->ht_down, ht); |
|---|
| 875 | | - |
|---|
| 876 | | - /* bump reference count as long as we hold pointer to structure */ |
|---|
| 877 | | - if (ht) |
|---|
| 878 | | - ht->refcnt++; |
|---|
| 879 | 817 | |
|---|
| 880 | 818 | #ifdef CONFIG_CLS_U32_PERF |
|---|
| 881 | 819 | /* Statistics may be incremented by readers during update |
|---|
| .. | .. |
|---|
| 891 | 829 | /* Similarly success statistics must be moved as pointers */ |
|---|
| 892 | 830 | new->pcpu_success = n->pcpu_success; |
|---|
| 893 | 831 | #endif |
|---|
| 894 | | - new->tp = tp; |
|---|
| 895 | | - memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); |
|---|
| 832 | + memcpy(&new->sel, s, struct_size(s, keys, s->nkeys)); |
|---|
| 896 | 833 | |
|---|
| 897 | | - if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) { |
|---|
| 834 | + if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) { |
|---|
| 898 | 835 | kfree(new); |
|---|
| 899 | 836 | return NULL; |
|---|
| 900 | 837 | } |
|---|
| 838 | + |
|---|
| 839 | + /* bump reference count as long as we hold pointer to structure */ |
|---|
| 840 | + if (ht) |
|---|
| 841 | + ht->refcnt++; |
|---|
| 901 | 842 | |
|---|
| 902 | 843 | return new; |
|---|
| 903 | 844 | } |
|---|
| 904 | 845 | |
|---|
| 905 | 846 | static int u32_change(struct net *net, struct sk_buff *in_skb, |
|---|
| 906 | 847 | struct tcf_proto *tp, unsigned long base, u32 handle, |
|---|
| 907 | | - struct nlattr **tca, void **arg, bool ovr, |
|---|
| 848 | + struct nlattr **tca, void **arg, bool ovr, bool rtnl_held, |
|---|
| 908 | 849 | struct netlink_ext_ack *extack) |
|---|
| 909 | 850 | { |
|---|
| 910 | 851 | struct tc_u_common *tp_c = tp->data; |
|---|
| .. | .. |
|---|
| 916 | 857 | u32 htid, flags = 0; |
|---|
| 917 | 858 | size_t sel_size; |
|---|
| 918 | 859 | int err; |
|---|
| 919 | | -#ifdef CONFIG_CLS_U32_PERF |
|---|
| 920 | | - size_t size; |
|---|
| 921 | | -#endif |
|---|
| 922 | 860 | |
|---|
| 923 | 861 | if (!opt) { |
|---|
| 924 | 862 | if (handle) { |
|---|
| .. | .. |
|---|
| 929 | 867 | } |
|---|
| 930 | 868 | } |
|---|
| 931 | 869 | |
|---|
| 932 | | - err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, extack); |
|---|
| 870 | + err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy, |
|---|
| 871 | + extack); |
|---|
| 933 | 872 | if (err < 0) |
|---|
| 934 | 873 | return err; |
|---|
| 935 | 874 | |
|---|
| .. | .. |
|---|
| 956 | 895 | return -EINVAL; |
|---|
| 957 | 896 | } |
|---|
| 958 | 897 | |
|---|
| 959 | | - new = u32_init_knode(tp, n); |
|---|
| 898 | + new = u32_init_knode(net, tp, n); |
|---|
| 960 | 899 | if (!new) |
|---|
| 961 | 900 | return -ENOMEM; |
|---|
| 962 | 901 | |
|---|
| 963 | | - err = u32_set_parms(net, tp, base, |
|---|
| 964 | | - rtnl_dereference(n->ht_up), new, tb, |
|---|
| 902 | + err = u32_set_parms(net, tp, base, new, tb, |
|---|
| 965 | 903 | tca[TCA_RATE], ovr, extack); |
|---|
| 966 | 904 | |
|---|
| 967 | 905 | if (err) { |
|---|
| 968 | | - u32_destroy_key(tp, new, false); |
|---|
| 906 | + __u32_destroy_key(new); |
|---|
| 969 | 907 | return err; |
|---|
| 970 | 908 | } |
|---|
| 971 | 909 | |
|---|
| 972 | 910 | err = u32_replace_hw_knode(tp, new, flags, extack); |
|---|
| 973 | 911 | if (err) { |
|---|
| 974 | | - u32_destroy_key(tp, new, false); |
|---|
| 912 | + __u32_destroy_key(new); |
|---|
| 975 | 913 | return err; |
|---|
| 976 | 914 | } |
|---|
| 977 | 915 | |
|---|
| .. | .. |
|---|
| 988 | 926 | if (tb[TCA_U32_DIVISOR]) { |
|---|
| 989 | 927 | unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); |
|---|
| 990 | 928 | |
|---|
| 991 | | - if (--divisor > 0x100) { |
|---|
| 929 | + if (!is_power_of_2(divisor)) { |
|---|
| 930 | + NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2"); |
|---|
| 931 | + return -EINVAL; |
|---|
| 932 | + } |
|---|
| 933 | + if (divisor-- > 0x100) { |
|---|
| 992 | 934 | NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets"); |
|---|
| 993 | 935 | return -EINVAL; |
|---|
| 994 | 936 | } |
|---|
| .. | .. |
|---|
| 996 | 938 | NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table"); |
|---|
| 997 | 939 | return -EINVAL; |
|---|
| 998 | 940 | } |
|---|
| 999 | | - ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL); |
|---|
| 941 | + ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL); |
|---|
| 1000 | 942 | if (ht == NULL) |
|---|
| 1001 | 943 | return -ENOBUFS; |
|---|
| 1002 | 944 | if (handle == 0) { |
|---|
| .. | .. |
|---|
| 1013 | 955 | return err; |
|---|
| 1014 | 956 | } |
|---|
| 1015 | 957 | } |
|---|
| 1016 | | - ht->tp_c = tp_c; |
|---|
| 1017 | 958 | ht->refcnt = 1; |
|---|
| 1018 | 959 | ht->divisor = divisor; |
|---|
| 1019 | 960 | ht->handle = handle; |
|---|
| .. | .. |
|---|
| 1057 | 998 | return -EINVAL; |
|---|
| 1058 | 999 | } |
|---|
| 1059 | 1000 | |
|---|
| 1001 | + /* At this point, we need to derive the new handle that will be used to |
|---|
| 1002 | + * uniquely map the identity of this table match entry. The |
|---|
| 1003 | + * identity of the entry that we need to construct is 32 bits made of: |
|---|
| 1004 | + * htid(12b):bucketid(8b):node/entryid(12b) |
|---|
| 1005 | + * |
|---|
| 1006 | + * At this point _we have the table(ht)_ in which we will insert this |
|---|
| 1007 | + * entry. We carry the table's id in variable "htid". |
|---|
| 1008 | + * Note that earlier code picked the ht selection either by a) the user |
|---|
| 1009 | + * providing the htid specified via TCA_U32_HASH attribute or b) when |
|---|
| 1010 | + * no such attribute is passed then the root ht, is default to at ID |
|---|
| 1011 | + * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0. |
|---|
| 1012 | + * If OTOH the user passed us the htid, they may also pass a bucketid of |
|---|
| 1013 | + * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is |
|---|
| 1014 | + * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be |
|---|
| 1015 | + * passed via the htid, so even if it was non-zero it will be ignored. |
|---|
| 1016 | + * |
|---|
| 1017 | + * We may also have a handle, if the user passed one. The handle also |
|---|
| 1018 | + * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b). |
|---|
| 1019 | + * Rule: the bucketid on the handle is ignored even if one was passed; |
|---|
| 1020 | + * rather the value on "htid" is always assumed to be the bucketid. |
|---|
| 1021 | + */ |
|---|
| 1060 | 1022 | if (handle) { |
|---|
| 1023 | + /* Rule: The htid from handle and tableid from htid must match */ |
|---|
| 1061 | 1024 | if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) { |
|---|
| 1062 | 1025 | NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch"); |
|---|
| 1063 | 1026 | return -EINVAL; |
|---|
| 1064 | 1027 | } |
|---|
| 1065 | | - handle = htid | TC_U32_NODE(handle); |
|---|
| 1066 | | - err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle, |
|---|
| 1067 | | - GFP_KERNEL); |
|---|
| 1068 | | - if (err) |
|---|
| 1069 | | - return err; |
|---|
| 1070 | | - } else |
|---|
| 1028 | + /* Ok, so far we have a valid htid(12b):bucketid(8b) but we |
|---|
| 1029 | + * need to finalize the table entry identification with the last |
|---|
| 1030 | + * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for |
|---|
| 1031 | + * entries. Rule: nodeid of 0 is reserved only for tables(see |
|---|
| 1032 | + * earlier code which processes TC_U32_DIVISOR attribute). |
|---|
| 1033 | + * Rule: The nodeid can only be derived from the handle (and not |
|---|
| 1034 | + * htid). |
|---|
| 1035 | + * Rule: if the handle specified zero for the node id example |
|---|
| 1036 | + * 0x60000000, then pick a new nodeid from the pool of IDs |
|---|
| 1037 | + * this hash table has been allocating from. |
|---|
| 1038 | + * If OTOH it is specified (i.e for example the user passed a |
|---|
| 1039 | + * handle such as 0x60000123), then we use it generate our final |
|---|
| 1040 | + * handle which is used to uniquely identify the match entry. |
|---|
| 1041 | + */ |
|---|
| 1042 | + if (!TC_U32_NODE(handle)) { |
|---|
| 1043 | + handle = gen_new_kid(ht, htid); |
|---|
| 1044 | + } else { |
|---|
| 1045 | + handle = htid | TC_U32_NODE(handle); |
|---|
| 1046 | + err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, |
|---|
| 1047 | + handle, GFP_KERNEL); |
|---|
| 1048 | + if (err) |
|---|
| 1049 | + return err; |
|---|
| 1050 | + } |
|---|
| 1051 | + } else { |
|---|
| 1052 | + /* The user did not give us a handle; lets just generate one |
|---|
| 1053 | + * from the table's pool of nodeids. |
|---|
| 1054 | + */ |
|---|
| 1071 | 1055 | handle = gen_new_kid(ht, htid); |
|---|
| 1056 | + } |
|---|
| 1072 | 1057 | |
|---|
| 1073 | 1058 | if (tb[TCA_U32_SEL] == NULL) { |
|---|
| 1074 | 1059 | NL_SET_ERR_MSG_MOD(extack, "Selector not specified"); |
|---|
| .. | .. |
|---|
| 1083 | 1068 | goto erridr; |
|---|
| 1084 | 1069 | } |
|---|
| 1085 | 1070 | |
|---|
| 1086 | | - n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL); |
|---|
| 1071 | + n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL); |
|---|
| 1087 | 1072 | if (n == NULL) { |
|---|
| 1088 | 1073 | err = -ENOBUFS; |
|---|
| 1089 | 1074 | goto erridr; |
|---|
| 1090 | 1075 | } |
|---|
| 1091 | 1076 | |
|---|
| 1092 | 1077 | #ifdef CONFIG_CLS_U32_PERF |
|---|
| 1093 | | - size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64); |
|---|
| 1094 | | - n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt)); |
|---|
| 1078 | + n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys), |
|---|
| 1079 | + __alignof__(struct tc_u32_pcnt)); |
|---|
| 1095 | 1080 | if (!n->pf) { |
|---|
| 1096 | 1081 | err = -ENOBUFS; |
|---|
| 1097 | 1082 | goto errfree; |
|---|
| .. | .. |
|---|
| 1103 | 1088 | n->handle = handle; |
|---|
| 1104 | 1089 | n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; |
|---|
| 1105 | 1090 | n->flags = flags; |
|---|
| 1106 | | - n->tp = tp; |
|---|
| 1107 | 1091 | |
|---|
| 1108 | | - err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE); |
|---|
| 1092 | + err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE); |
|---|
| 1109 | 1093 | if (err < 0) |
|---|
| 1110 | 1094 | goto errout; |
|---|
| 1111 | 1095 | |
|---|
| .. | .. |
|---|
| 1125 | 1109 | } |
|---|
| 1126 | 1110 | #endif |
|---|
| 1127 | 1111 | |
|---|
| 1128 | | - err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr, |
|---|
| 1112 | + err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr, |
|---|
| 1129 | 1113 | extack); |
|---|
| 1130 | 1114 | if (err == 0) { |
|---|
| 1131 | 1115 | struct tc_u_knode __rcu **ins; |
|---|
| .. | .. |
|---|
| 1146 | 1130 | |
|---|
| 1147 | 1131 | RCU_INIT_POINTER(n->next, pins); |
|---|
| 1148 | 1132 | rcu_assign_pointer(*ins, n); |
|---|
| 1133 | + tp_c->knodes++; |
|---|
| 1149 | 1134 | *arg = n; |
|---|
| 1150 | 1135 | return 0; |
|---|
| 1151 | 1136 | } |
|---|
| .. | .. |
|---|
| 1167 | 1152 | return err; |
|---|
| 1168 | 1153 | } |
|---|
| 1169 | 1154 | |
|---|
| 1170 | | -static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) |
|---|
| 1155 | +static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg, |
|---|
| 1156 | + bool rtnl_held) |
|---|
| 1171 | 1157 | { |
|---|
| 1172 | 1158 | struct tc_u_common *tp_c = tp->data; |
|---|
| 1173 | 1159 | struct tc_u_hnode *ht; |
|---|
| .. | .. |
|---|
| 1208 | 1194 | } |
|---|
| 1209 | 1195 | |
|---|
| 1210 | 1196 | static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, |
|---|
| 1211 | | - bool add, tc_setup_cb_t *cb, void *cb_priv, |
|---|
| 1197 | + bool add, flow_setup_cb_t *cb, void *cb_priv, |
|---|
| 1212 | 1198 | struct netlink_ext_ack *extack) |
|---|
| 1213 | 1199 | { |
|---|
| 1214 | 1200 | struct tc_cls_u32_offload cls_u32 = {}; |
|---|
| .. | .. |
|---|
| 1228 | 1214 | } |
|---|
| 1229 | 1215 | |
|---|
| 1230 | 1216 | static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, |
|---|
| 1231 | | - bool add, tc_setup_cb_t *cb, void *cb_priv, |
|---|
| 1217 | + bool add, flow_setup_cb_t *cb, void *cb_priv, |
|---|
| 1232 | 1218 | struct netlink_ext_ack *extack) |
|---|
| 1233 | 1219 | { |
|---|
| 1234 | 1220 | struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); |
|---|
| .. | .. |
|---|
| 1251 | 1237 | cls_u32.knode.mask = 0; |
|---|
| 1252 | 1238 | #endif |
|---|
| 1253 | 1239 | cls_u32.knode.sel = &n->sel; |
|---|
| 1240 | + cls_u32.knode.res = &n->res; |
|---|
| 1254 | 1241 | cls_u32.knode.exts = &n->exts; |
|---|
| 1255 | 1242 | if (n->ht_down) |
|---|
| 1256 | 1243 | cls_u32.knode.link_handle = ht->handle; |
|---|
| 1257 | 1244 | } |
|---|
| 1258 | 1245 | |
|---|
| 1259 | | - err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv); |
|---|
| 1260 | | - if (err) { |
|---|
| 1261 | | - if (add && tc_skip_sw(n->flags)) |
|---|
| 1262 | | - return err; |
|---|
| 1263 | | - return 0; |
|---|
| 1264 | | - } |
|---|
| 1265 | | - |
|---|
| 1266 | | - tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add); |
|---|
| 1246 | + err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32, |
|---|
| 1247 | + &cls_u32, cb_priv, &n->flags, |
|---|
| 1248 | + &n->in_hw_count); |
|---|
| 1249 | + if (err) |
|---|
| 1250 | + return err; |
|---|
| 1267 | 1251 | |
|---|
| 1268 | 1252 | return 0; |
|---|
| 1269 | 1253 | } |
|---|
| 1270 | 1254 | |
|---|
| 1271 | | -static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, |
|---|
| 1255 | +static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, |
|---|
| 1272 | 1256 | void *cb_priv, struct netlink_ext_ack *extack) |
|---|
| 1273 | 1257 | { |
|---|
| 1274 | 1258 | struct tc_u_common *tp_c = tp->data; |
|---|
| .. | .. |
|---|
| 1329 | 1313 | } |
|---|
| 1330 | 1314 | |
|---|
| 1331 | 1315 | static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh, |
|---|
| 1332 | | - struct sk_buff *skb, struct tcmsg *t) |
|---|
| 1316 | + struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) |
|---|
| 1333 | 1317 | { |
|---|
| 1334 | 1318 | struct tc_u_knode *n = fh; |
|---|
| 1335 | 1319 | struct tc_u_hnode *ht_up, *ht_down; |
|---|
| .. | .. |
|---|
| 1340 | 1324 | |
|---|
| 1341 | 1325 | t->tcm_handle = n->handle; |
|---|
| 1342 | 1326 | |
|---|
| 1343 | | - nest = nla_nest_start(skb, TCA_OPTIONS); |
|---|
| 1327 | + nest = nla_nest_start_noflag(skb, TCA_OPTIONS); |
|---|
| 1344 | 1328 | if (nest == NULL) |
|---|
| 1345 | 1329 | goto nla_put_failure; |
|---|
| 1346 | 1330 | |
|---|
| .. | .. |
|---|
| 1356 | 1340 | int cpu; |
|---|
| 1357 | 1341 | #endif |
|---|
| 1358 | 1342 | |
|---|
| 1359 | | - if (nla_put(skb, TCA_U32_SEL, |
|---|
| 1360 | | - sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), |
|---|
| 1343 | + if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys), |
|---|
| 1361 | 1344 | &n->sel)) |
|---|
| 1362 | 1345 | goto nla_put_failure; |
|---|
| 1363 | 1346 | |
|---|
| .. | .. |
|---|
| 1400 | 1383 | if (tcf_exts_dump(skb, &n->exts) < 0) |
|---|
| 1401 | 1384 | goto nla_put_failure; |
|---|
| 1402 | 1385 | |
|---|
| 1403 | | -#ifdef CONFIG_NET_CLS_IND |
|---|
| 1404 | 1386 | if (n->ifindex) { |
|---|
| 1405 | 1387 | struct net_device *dev; |
|---|
| 1406 | 1388 | dev = __dev_get_by_index(net, n->ifindex); |
|---|
| 1407 | 1389 | if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name)) |
|---|
| 1408 | 1390 | goto nla_put_failure; |
|---|
| 1409 | 1391 | } |
|---|
| 1410 | | -#endif |
|---|
| 1411 | 1392 | #ifdef CONFIG_CLS_U32_PERF |
|---|
| 1412 | | - gpf = kzalloc(sizeof(struct tc_u32_pcnt) + |
|---|
| 1413 | | - n->sel.nkeys * sizeof(u64), |
|---|
| 1414 | | - GFP_KERNEL); |
|---|
| 1393 | + gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL); |
|---|
| 1415 | 1394 | if (!gpf) |
|---|
| 1416 | 1395 | goto nla_put_failure; |
|---|
| 1417 | 1396 | |
|---|
| .. | .. |
|---|
| 1425 | 1404 | gpf->kcnts[i] += pf->kcnts[i]; |
|---|
| 1426 | 1405 | } |
|---|
| 1427 | 1406 | |
|---|
| 1428 | | - if (nla_put_64bit(skb, TCA_U32_PCNT, |
|---|
| 1429 | | - sizeof(struct tc_u32_pcnt) + |
|---|
| 1430 | | - n->sel.nkeys * sizeof(u64), |
|---|
| 1407 | + if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys), |
|---|
| 1431 | 1408 | gpf, TCA_U32_PAD)) { |
|---|
| 1432 | 1409 | kfree(gpf); |
|---|
| 1433 | 1410 | goto nla_put_failure; |
|---|
| .. | .. |
|---|
| 1471 | 1448 | #ifdef CONFIG_CLS_U32_PERF |
|---|
| 1472 | 1449 | pr_info(" Performance counters on\n"); |
|---|
| 1473 | 1450 | #endif |
|---|
| 1474 | | -#ifdef CONFIG_NET_CLS_IND |
|---|
| 1475 | 1451 | pr_info(" input device check on\n"); |
|---|
| 1476 | | -#endif |
|---|
| 1477 | 1452 | #ifdef CONFIG_NET_CLS_ACT |
|---|
| 1478 | 1453 | pr_info(" Actions configured\n"); |
|---|
| 1479 | 1454 | #endif |
|---|