| .. | .. |
|---|
| 59 | 59 | rwlock_t lock; |
|---|
| 60 | 60 | atomic_t nfree; /* number of free entries */ |
|---|
| 61 | 61 | struct l2t_entry *rover; /* starting point for next allocation */ |
|---|
| 62 | | - struct l2t_entry l2tab[0]; /* MUST BE LAST */ |
|---|
| 62 | + struct l2t_entry l2tab[]; /* MUST BE LAST */ |
|---|
| 63 | 63 | }; |
|---|
| 64 | 64 | |
|---|
| 65 | 65 | static inline unsigned int vlan_prio(const struct l2t_entry *e) |
|---|
| .. | .. |
|---|
| 231 | 231 | if (e->state == L2T_STATE_STALE) |
|---|
| 232 | 232 | e->state = L2T_STATE_VALID; |
|---|
| 233 | 233 | spin_unlock_bh(&e->lock); |
|---|
| 234 | | - /* fall through */ |
|---|
| 234 | + fallthrough; |
|---|
| 235 | 235 | case L2T_STATE_VALID: /* fast-path, send the packet on */ |
|---|
| 236 | 236 | return t4_ofld_send(adap, skb); |
|---|
| 237 | 237 | case L2T_STATE_RESOLVING: |
|---|
| .. | .. |
|---|
| 351 | 351 | static void _t4_l2e_free(struct l2t_entry *e) |
|---|
| 352 | 352 | { |
|---|
| 353 | 353 | struct l2t_data *d; |
|---|
| 354 | | - struct sk_buff *skb; |
|---|
| 355 | 354 | |
|---|
| 356 | 355 | if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ |
|---|
| 357 | 356 | if (e->neigh) { |
|---|
| 358 | 357 | neigh_release(e->neigh); |
|---|
| 359 | 358 | e->neigh = NULL; |
|---|
| 360 | 359 | } |
|---|
| 361 | | - while ((skb = __skb_dequeue(&e->arpq)) != NULL) |
|---|
| 362 | | - kfree_skb(skb); |
|---|
| 360 | + __skb_queue_purge(&e->arpq); |
|---|
| 363 | 361 | } |
|---|
| 364 | 362 | |
|---|
| 365 | 363 | d = container_of(e, struct l2t_data, l2tab[e->idx]); |
|---|
| .. | .. |
|---|
| 370 | 368 | static void t4_l2e_free(struct l2t_entry *e) |
|---|
| 371 | 369 | { |
|---|
| 372 | 370 | struct l2t_data *d; |
|---|
| 373 | | - struct sk_buff *skb; |
|---|
| 374 | 371 | |
|---|
| 375 | 372 | spin_lock_bh(&e->lock); |
|---|
| 376 | 373 | if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ |
|---|
| .. | .. |
|---|
| 378 | 375 | neigh_release(e->neigh); |
|---|
| 379 | 376 | e->neigh = NULL; |
|---|
| 380 | 377 | } |
|---|
| 381 | | - while ((skb = __skb_dequeue(&e->arpq)) != NULL) |
|---|
| 382 | | - kfree_skb(skb); |
|---|
| 378 | + __skb_queue_purge(&e->arpq); |
|---|
| 383 | 379 | } |
|---|
| 384 | 380 | spin_unlock_bh(&e->lock); |
|---|
| 385 | 381 | |
|---|
| .. | .. |
|---|
| 433 | 429 | else |
|---|
| 434 | 430 | lport = netdev2pinfo(physdev)->lport; |
|---|
| 435 | 431 | |
|---|
| 436 | | - if (is_vlan_dev(neigh->dev)) |
|---|
| 432 | + if (is_vlan_dev(neigh->dev)) { |
|---|
| 437 | 433 | vlan = vlan_dev_vlan_id(neigh->dev); |
|---|
| 438 | | - else |
|---|
| 434 | + vlan |= vlan_dev_get_egress_qos_mask(neigh->dev, priority); |
|---|
| 435 | + } else { |
|---|
| 439 | 436 | vlan = VLAN_NONE; |
|---|
| 437 | + } |
|---|
| 440 | 438 | |
|---|
| 441 | 439 | write_lock_bh(&d->lock); |
|---|
| 442 | 440 | for (e = d->l2tab[hash].first; e; e = e->next) |
|---|
| .. | .. |
|---|
| 493 | 491 | ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; |
|---|
| 494 | 492 | |
|---|
| 495 | 493 | if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) { |
|---|
| 496 | | - u32 viid = cxgb4_port_viid(dev); |
|---|
| 497 | | - u32 vf = FW_VIID_VIN_G(viid); |
|---|
| 498 | | - u32 pf = FW_VIID_PFN_G(viid); |
|---|
| 499 | | - u32 vld = FW_VIID_VIVLD_G(viid); |
|---|
| 494 | + struct port_info *pi = (struct port_info *)netdev_priv(dev); |
|---|
| 500 | 495 | |
|---|
| 501 | | - ntuple |= (u64)(FT_VNID_ID_VF_V(vf) | |
|---|
| 502 | | - FT_VNID_ID_PF_V(pf) | |
|---|
| 503 | | - FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift; |
|---|
| 496 | + ntuple |= (u64)(FT_VNID_ID_VF_V(pi->vin) | |
|---|
| 497 | + FT_VNID_ID_PF_V(adap->pf) | |
|---|
| 498 | + FT_VNID_ID_VLD_V(pi->vivld)) << tp->vnic_shift; |
|---|
| 504 | 499 | } |
|---|
| 505 | 500 | |
|---|
| 506 | 501 | return ntuple; |
|---|
| .. | .. |
|---|
| 614 | 609 | } |
|---|
| 615 | 610 | |
|---|
| 616 | 611 | /** |
|---|
| 612 | + * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters |
|---|
| 617 | 613 | * @dev: net_device pointer |
|---|
| 618 | 614 | * @vlan: VLAN Id |
|---|
| 619 | 615 | * @port: Associated port |
|---|
| .. | .. |
|---|
| 643 | 639 | if (l2t_size < L2T_MIN_HASH_BUCKETS) |
|---|
| 644 | 640 | return NULL; |
|---|
| 645 | 641 | |
|---|
| 646 | | - d = kvzalloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry), GFP_KERNEL); |
|---|
| 642 | + d = kvzalloc(struct_size(d, l2tab, l2t_size), GFP_KERNEL); |
|---|
| 647 | 643 | if (!d) |
|---|
| 648 | 644 | return NULL; |
|---|
| 649 | 645 | |
|---|
| .. | .. |
|---|
| 701 | 697 | } |
|---|
| 702 | 698 | } |
|---|
| 703 | 699 | |
|---|
| 700 | +bool cxgb4_check_l2t_valid(struct l2t_entry *e) |
|---|
| 701 | +{ |
|---|
| 702 | + bool valid; |
|---|
| 703 | + |
|---|
| 704 | + spin_lock(&e->lock); |
|---|
| 705 | + valid = (e->state == L2T_STATE_VALID); |
|---|
| 706 | + spin_unlock(&e->lock); |
|---|
| 707 | + return valid; |
|---|
| 708 | +} |
|---|
| 709 | +EXPORT_SYMBOL(cxgb4_check_l2t_valid); |
|---|
| 710 | + |
|---|
| 704 | 711 | static int l2t_seq_show(struct seq_file *seq, void *v) |
|---|
| 705 | 712 | { |
|---|
| 706 | 713 | if (v == SEQ_START_TOKEN) |
|---|