| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2015 Cavium, Inc. |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify it |
|---|
| 5 | | - * under the terms of version 2 of the GNU General Public License |
|---|
| 6 | | - * as published by the Free Software Foundation. |
|---|
| 7 | 4 | */ |
|---|
| 8 | 5 | |
|---|
| 9 | 6 | #include <linux/module.h> |
|---|
| .. | .. |
|---|
| 75 | 72 | MODULE_PARM_DESC(cpi_alg, |
|---|
| 76 | 73 | "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); |
|---|
| 77 | 74 | |
|---|
| 78 | | -/* workqueue for handling kernel ndo_set_rx_mode() calls */ |
|---|
| 79 | | -static struct workqueue_struct *nicvf_rx_mode_wq; |
|---|
| 80 | | - |
|---|
| 81 | 75 | static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) |
|---|
| 82 | 76 | { |
|---|
| 83 | 77 | if (nic->sqs_mode) |
|---|
| .. | .. |
|---|
| 132 | 126 | |
|---|
| 133 | 127 | int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) |
|---|
| 134 | 128 | { |
|---|
| 135 | | - int timeout = NIC_MBOX_MSG_TIMEOUT; |
|---|
| 136 | | - int sleep = 10; |
|---|
| 129 | + unsigned long timeout; |
|---|
| 130 | + int ret = 0; |
|---|
| 131 | + |
|---|
| 132 | + mutex_lock(&nic->rx_mode_mtx); |
|---|
| 137 | 133 | |
|---|
| 138 | 134 | nic->pf_acked = false; |
|---|
| 139 | 135 | nic->pf_nacked = false; |
|---|
| 140 | 136 | |
|---|
| 141 | 137 | nicvf_write_to_mbx(nic, mbx); |
|---|
| 142 | 138 | |
|---|
| 139 | + timeout = jiffies + msecs_to_jiffies(NIC_MBOX_MSG_TIMEOUT); |
|---|
| 143 | 140 | /* Wait for previous message to be acked, timeout 2sec */ |
|---|
| 144 | 141 | while (!nic->pf_acked) { |
|---|
| 145 | 142 | if (nic->pf_nacked) { |
|---|
| 146 | 143 | netdev_err(nic->netdev, |
|---|
| 147 | 144 | "PF NACK to mbox msg 0x%02x from VF%d\n", |
|---|
| 148 | 145 | (mbx->msg.msg & 0xFF), nic->vf_id); |
|---|
| 149 | | - return -EINVAL; |
|---|
| 146 | + ret = -EINVAL; |
|---|
| 147 | + break; |
|---|
| 150 | 148 | } |
|---|
| 151 | | - msleep(sleep); |
|---|
| 149 | + usleep_range(8000, 10000); |
|---|
| 152 | 150 | if (nic->pf_acked) |
|---|
| 153 | 151 | break; |
|---|
| 154 | | - timeout -= sleep; |
|---|
| 155 | | - if (!timeout) { |
|---|
| 152 | + if (time_after(jiffies, timeout)) { |
|---|
| 156 | 153 | netdev_err(nic->netdev, |
|---|
| 157 | 154 | "PF didn't ACK to mbox msg 0x%02x from VF%d\n", |
|---|
| 158 | 155 | (mbx->msg.msg & 0xFF), nic->vf_id); |
|---|
| 159 | | - return -EBUSY; |
|---|
| 156 | + ret = -EBUSY; |
|---|
| 157 | + break; |
|---|
| 160 | 158 | } |
|---|
| 161 | 159 | } |
|---|
| 162 | | - return 0; |
|---|
| 160 | + mutex_unlock(&nic->rx_mode_mtx); |
|---|
| 161 | + return ret; |
|---|
| 163 | 162 | } |
|---|
| 164 | 163 | |
|---|
| 165 | 164 | /* Checks if VF is able to comminicate with PF |
|---|
| .. | .. |
|---|
| 246 | 245 | break; |
|---|
| 247 | 246 | case NIC_MBOX_MSG_BGX_LINK_CHANGE: |
|---|
| 248 | 247 | nic->pf_acked = true; |
|---|
| 249 | | - nic->link_up = mbx.link_status.link_up; |
|---|
| 250 | | - nic->duplex = mbx.link_status.duplex; |
|---|
| 251 | | - nic->speed = mbx.link_status.speed; |
|---|
| 252 | | - nic->mac_type = mbx.link_status.mac_type; |
|---|
| 253 | | - if (nic->link_up) { |
|---|
| 254 | | - netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", |
|---|
| 255 | | - nic->speed, |
|---|
| 256 | | - nic->duplex == DUPLEX_FULL ? |
|---|
| 257 | | - "Full" : "Half"); |
|---|
| 258 | | - netif_carrier_on(nic->netdev); |
|---|
| 259 | | - netif_tx_start_all_queues(nic->netdev); |
|---|
| 260 | | - } else { |
|---|
| 261 | | - netdev_info(nic->netdev, "Link is Down\n"); |
|---|
| 262 | | - netif_carrier_off(nic->netdev); |
|---|
| 263 | | - netif_tx_stop_all_queues(nic->netdev); |
|---|
| 248 | + if (nic->link_up != mbx.link_status.link_up) { |
|---|
| 249 | + nic->link_up = mbx.link_status.link_up; |
|---|
| 250 | + nic->duplex = mbx.link_status.duplex; |
|---|
| 251 | + nic->speed = mbx.link_status.speed; |
|---|
| 252 | + nic->mac_type = mbx.link_status.mac_type; |
|---|
| 253 | + if (nic->link_up) { |
|---|
| 254 | + netdev_info(nic->netdev, |
|---|
| 255 | + "Link is Up %d Mbps %s duplex\n", |
|---|
| 256 | + nic->speed, |
|---|
| 257 | + nic->duplex == DUPLEX_FULL ? |
|---|
| 258 | + "Full" : "Half"); |
|---|
| 259 | + netif_carrier_on(nic->netdev); |
|---|
| 260 | + netif_tx_start_all_queues(nic->netdev); |
|---|
| 261 | + } else { |
|---|
| 262 | + netdev_info(nic->netdev, "Link is Down\n"); |
|---|
| 263 | + netif_carrier_off(nic->netdev); |
|---|
| 264 | + netif_tx_stop_all_queues(nic->netdev); |
|---|
| 265 | + } |
|---|
| 264 | 266 | } |
|---|
| 265 | 267 | break; |
|---|
| 266 | 268 | case NIC_MBOX_MSG_ALLOC_SQS: |
|---|
| .. | .. |
|---|
| 550 | 552 | xdp_set_data_meta_invalid(&xdp); |
|---|
| 551 | 553 | xdp.data_end = xdp.data + len; |
|---|
| 552 | 554 | xdp.rxq = &rq->xdp_rxq; |
|---|
| 555 | + xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM; |
|---|
| 553 | 556 | orig_data = xdp.data; |
|---|
| 554 | 557 | |
|---|
| 555 | 558 | rcu_read_lock(); |
|---|
| .. | .. |
|---|
| 591 | 594 | return true; |
|---|
| 592 | 595 | default: |
|---|
| 593 | 596 | bpf_warn_invalid_xdp_action(action); |
|---|
| 594 | | - /* fall through */ |
|---|
| 597 | + fallthrough; |
|---|
| 595 | 598 | case XDP_ABORTED: |
|---|
| 596 | 599 | trace_xdp_exception(nic->netdev, prog, action); |
|---|
| 597 | | - /* fall through */ |
|---|
| 600 | + fallthrough; |
|---|
| 598 | 601 | case XDP_DROP: |
|---|
| 599 | 602 | /* Check if it's a recycled page, if not |
|---|
| 600 | 603 | * unmap the DMA mapping. |
|---|
| .. | .. |
|---|
| 982 | 985 | * |
|---|
| 983 | 986 | * As of now only CQ errors are handled |
|---|
| 984 | 987 | */ |
|---|
| 985 | | -static void nicvf_handle_qs_err(unsigned long data) |
|---|
| 988 | +static void nicvf_handle_qs_err(struct tasklet_struct *t) |
|---|
| 986 | 989 | { |
|---|
| 987 | | - struct nicvf *nic = (struct nicvf *)data; |
|---|
| 990 | + struct nicvf *nic = from_tasklet(nic, t, qs_err_task); |
|---|
| 988 | 991 | struct queue_set *qs = nic->qs; |
|---|
| 989 | 992 | int qidx; |
|---|
| 990 | 993 | u64 status; |
|---|
| .. | .. |
|---|
| 1329 | 1332 | struct nicvf_cq_poll *cq_poll = NULL; |
|---|
| 1330 | 1333 | union nic_mbx mbx = {}; |
|---|
| 1331 | 1334 | |
|---|
| 1335 | + /* wait till all queued set_rx_mode tasks completes */ |
|---|
| 1336 | + if (nic->nicvf_rx_mode_wq) { |
|---|
| 1337 | + cancel_delayed_work_sync(&nic->link_change_work); |
|---|
| 1338 | + drain_workqueue(nic->nicvf_rx_mode_wq); |
|---|
| 1339 | + } |
|---|
| 1340 | + |
|---|
| 1332 | 1341 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; |
|---|
| 1333 | 1342 | nicvf_send_msg_to_pf(nic, &mbx); |
|---|
| 1334 | 1343 | |
|---|
| .. | .. |
|---|
| 1428 | 1437 | return nicvf_send_msg_to_pf(nic, &mbx); |
|---|
| 1429 | 1438 | } |
|---|
| 1430 | 1439 | |
|---|
| 1440 | +static void nicvf_link_status_check_task(struct work_struct *work_arg) |
|---|
| 1441 | +{ |
|---|
| 1442 | + struct nicvf *nic = container_of(work_arg, |
|---|
| 1443 | + struct nicvf, |
|---|
| 1444 | + link_change_work.work); |
|---|
| 1445 | + union nic_mbx mbx = {}; |
|---|
| 1446 | + mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; |
|---|
| 1447 | + nicvf_send_msg_to_pf(nic, &mbx); |
|---|
| 1448 | + queue_delayed_work(nic->nicvf_rx_mode_wq, |
|---|
| 1449 | + &nic->link_change_work, 2 * HZ); |
|---|
| 1450 | +} |
|---|
| 1451 | + |
|---|
| 1431 | 1452 | int nicvf_open(struct net_device *netdev) |
|---|
| 1432 | 1453 | { |
|---|
| 1433 | 1454 | int cpu, err, qidx; |
|---|
| 1434 | 1455 | struct nicvf *nic = netdev_priv(netdev); |
|---|
| 1435 | 1456 | struct queue_set *qs = nic->qs; |
|---|
| 1436 | 1457 | struct nicvf_cq_poll *cq_poll = NULL; |
|---|
| 1458 | + |
|---|
| 1459 | + /* wait till all queued set_rx_mode tasks completes if any */ |
|---|
| 1460 | + if (nic->nicvf_rx_mode_wq) |
|---|
| 1461 | + drain_workqueue(nic->nicvf_rx_mode_wq); |
|---|
| 1437 | 1462 | |
|---|
| 1438 | 1463 | netif_carrier_off(netdev); |
|---|
| 1439 | 1464 | |
|---|
| .. | .. |
|---|
| 1468 | 1493 | } |
|---|
| 1469 | 1494 | |
|---|
| 1470 | 1495 | /* Init tasklet for handling Qset err interrupt */ |
|---|
| 1471 | | - tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err, |
|---|
| 1472 | | - (unsigned long)nic); |
|---|
| 1496 | + tasklet_setup(&nic->qs_err_task, nicvf_handle_qs_err); |
|---|
| 1473 | 1497 | |
|---|
| 1474 | 1498 | /* Init RBDR tasklet which will refill RBDR */ |
|---|
| 1475 | | - tasklet_init(&nic->rbdr_task, nicvf_rbdr_task, |
|---|
| 1476 | | - (unsigned long)nic); |
|---|
| 1499 | + tasklet_setup(&nic->rbdr_task, nicvf_rbdr_task); |
|---|
| 1477 | 1500 | INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work); |
|---|
| 1478 | 1501 | |
|---|
| 1479 | 1502 | /* Configure CPI alorithm */ |
|---|
| .. | .. |
|---|
| 1530 | 1553 | |
|---|
| 1531 | 1554 | /* Send VF config done msg to PF */ |
|---|
| 1532 | 1555 | nicvf_send_cfg_done(nic); |
|---|
| 1556 | + |
|---|
| 1557 | + if (nic->nicvf_rx_mode_wq) { |
|---|
| 1558 | + INIT_DELAYED_WORK(&nic->link_change_work, |
|---|
| 1559 | + nicvf_link_status_check_task); |
|---|
| 1560 | + queue_delayed_work(nic->nicvf_rx_mode_wq, |
|---|
| 1561 | + &nic->link_change_work, 0); |
|---|
| 1562 | + } |
|---|
| 1533 | 1563 | |
|---|
| 1534 | 1564 | return 0; |
|---|
| 1535 | 1565 | cleanup: |
|---|
| .. | .. |
|---|
| 1709 | 1739 | |
|---|
| 1710 | 1740 | } |
|---|
| 1711 | 1741 | |
|---|
| 1712 | | -static void nicvf_tx_timeout(struct net_device *dev) |
|---|
| 1742 | +static void nicvf_tx_timeout(struct net_device *dev, unsigned int txqueue) |
|---|
| 1713 | 1743 | { |
|---|
| 1714 | 1744 | struct nicvf *nic = netdev_priv(dev); |
|---|
| 1715 | 1745 | |
|---|
| .. | .. |
|---|
| 1844 | 1874 | |
|---|
| 1845 | 1875 | if (nic->xdp_prog) { |
|---|
| 1846 | 1876 | /* Attach BPF program */ |
|---|
| 1847 | | - nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); |
|---|
| 1848 | | - if (!IS_ERR(nic->xdp_prog)) { |
|---|
| 1849 | | - bpf_attached = true; |
|---|
| 1850 | | - } else { |
|---|
| 1851 | | - ret = PTR_ERR(nic->xdp_prog); |
|---|
| 1852 | | - nic->xdp_prog = NULL; |
|---|
| 1853 | | - } |
|---|
| 1877 | + bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); |
|---|
| 1878 | + bpf_attached = true; |
|---|
| 1854 | 1879 | } |
|---|
| 1855 | 1880 | |
|---|
| 1856 | 1881 | /* Calculate Tx queues needed for XDP and network stack */ |
|---|
| .. | .. |
|---|
| 1879 | 1904 | switch (xdp->command) { |
|---|
| 1880 | 1905 | case XDP_SETUP_PROG: |
|---|
| 1881 | 1906 | return nicvf_xdp_setup(nic, xdp->prog); |
|---|
| 1882 | | - case XDP_QUERY_PROG: |
|---|
| 1883 | | - xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0; |
|---|
| 1884 | | - return 0; |
|---|
| 1885 | 1907 | default: |
|---|
| 1886 | 1908 | return -EINVAL; |
|---|
| 1887 | 1909 | } |
|---|
| .. | .. |
|---|
| 1976 | 1998 | * its' own LMAC to the filter to accept packets for it. |
|---|
| 1977 | 1999 | */ |
|---|
| 1978 | 2000 | mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; |
|---|
| 1979 | | - mbx.xcast.data.mac = 0; |
|---|
| 2001 | + mbx.xcast.mac = 0; |
|---|
| 1980 | 2002 | if (nicvf_send_msg_to_pf(nic, &mbx) < 0) |
|---|
| 1981 | 2003 | goto free_mc; |
|---|
| 1982 | 2004 | } |
|---|
| .. | .. |
|---|
| 1986 | 2008 | /* now go through kernel list of MACs and add them one by one */ |
|---|
| 1987 | 2009 | for (idx = 0; idx < mc_addrs->count; idx++) { |
|---|
| 1988 | 2010 | mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; |
|---|
| 1989 | | - mbx.xcast.data.mac = mc_addrs->mc[idx]; |
|---|
| 2011 | + mbx.xcast.mac = mc_addrs->mc[idx]; |
|---|
| 1990 | 2012 | if (nicvf_send_msg_to_pf(nic, &mbx) < 0) |
|---|
| 1991 | 2013 | goto free_mc; |
|---|
| 1992 | 2014 | } |
|---|
| .. | .. |
|---|
| 1994 | 2016 | |
|---|
| 1995 | 2017 | /* and finally set rx mode for PF accordingly */ |
|---|
| 1996 | 2018 | mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; |
|---|
| 1997 | | - mbx.xcast.data.mode = mode; |
|---|
| 2019 | + mbx.xcast.mode = mode; |
|---|
| 1998 | 2020 | |
|---|
| 1999 | 2021 | nicvf_send_msg_to_pf(nic, &mbx); |
|---|
| 2000 | 2022 | free_mc: |
|---|
| .. | .. |
|---|
| 2004 | 2026 | static void nicvf_set_rx_mode_task(struct work_struct *work_arg) |
|---|
| 2005 | 2027 | { |
|---|
| 2006 | 2028 | struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, |
|---|
| 2007 | | - work.work); |
|---|
| 2029 | + work); |
|---|
| 2008 | 2030 | struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); |
|---|
| 2009 | 2031 | u8 mode; |
|---|
| 2010 | 2032 | struct xcast_addr_list *mc; |
|---|
| .. | .. |
|---|
| 2043 | 2065 | mode |= BGX_XCAST_MCAST_FILTER; |
|---|
| 2044 | 2066 | /* here we need to copy mc addrs */ |
|---|
| 2045 | 2067 | if (netdev_mc_count(netdev)) { |
|---|
| 2046 | | - mc_list = kmalloc(offsetof(typeof(*mc_list), |
|---|
| 2047 | | - mc[netdev_mc_count(netdev)]), |
|---|
| 2068 | + mc_list = kmalloc(struct_size(mc_list, mc, |
|---|
| 2069 | + netdev_mc_count(netdev)), |
|---|
| 2048 | 2070 | GFP_ATOMIC); |
|---|
| 2049 | 2071 | if (unlikely(!mc_list)) |
|---|
| 2050 | 2072 | return; |
|---|
| .. | .. |
|---|
| 2061 | 2083 | kfree(nic->rx_mode_work.mc); |
|---|
| 2062 | 2084 | nic->rx_mode_work.mc = mc_list; |
|---|
| 2063 | 2085 | nic->rx_mode_work.mode = mode; |
|---|
| 2064 | | - queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); |
|---|
| 2086 | + queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work); |
|---|
| 2065 | 2087 | spin_unlock(&nic->rx_mode_wq_lock); |
|---|
| 2066 | 2088 | } |
|---|
| 2067 | 2089 | |
|---|
| .. | .. |
|---|
| 2153 | 2175 | nic->max_queues *= 2; |
|---|
| 2154 | 2176 | nic->ptp_clock = ptp_clock; |
|---|
| 2155 | 2177 | |
|---|
| 2178 | + /* Initialize mutex that serializes usage of VF's mailbox */ |
|---|
| 2179 | + mutex_init(&nic->rx_mode_mtx); |
|---|
| 2180 | + |
|---|
| 2156 | 2181 | /* MAP VF's configuration registers */ |
|---|
| 2157 | 2182 | nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); |
|---|
| 2158 | 2183 | if (!nic->reg_base) { |
|---|
| .. | .. |
|---|
| 2218 | 2243 | |
|---|
| 2219 | 2244 | INIT_WORK(&nic->reset_task, nicvf_reset_task); |
|---|
| 2220 | 2245 | |
|---|
| 2221 | | - INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); |
|---|
| 2246 | + nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d", |
|---|
| 2247 | + WQ_MEM_RECLAIM, |
|---|
| 2248 | + nic->vf_id); |
|---|
| 2249 | + if (!nic->nicvf_rx_mode_wq) { |
|---|
| 2250 | + err = -ENOMEM; |
|---|
| 2251 | + dev_err(dev, "Failed to allocate work queue\n"); |
|---|
| 2252 | + goto err_unregister_interrupts; |
|---|
| 2253 | + } |
|---|
| 2254 | + |
|---|
| 2255 | + INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); |
|---|
| 2222 | 2256 | spin_lock_init(&nic->rx_mode_wq_lock); |
|---|
| 2223 | 2257 | |
|---|
| 2224 | 2258 | err = register_netdev(netdev); |
|---|
| 2225 | 2259 | if (err) { |
|---|
| 2226 | 2260 | dev_err(dev, "Failed to register netdevice\n"); |
|---|
| 2227 | | - goto err_unregister_interrupts; |
|---|
| 2261 | + goto err_destroy_workqueue; |
|---|
| 2228 | 2262 | } |
|---|
| 2229 | 2263 | |
|---|
| 2230 | 2264 | nic->msg_enable = debug; |
|---|
| .. | .. |
|---|
| 2233 | 2267 | |
|---|
| 2234 | 2268 | return 0; |
|---|
| 2235 | 2269 | |
|---|
| 2270 | +err_destroy_workqueue: |
|---|
| 2271 | + destroy_workqueue(nic->nicvf_rx_mode_wq); |
|---|
| 2236 | 2272 | err_unregister_interrupts: |
|---|
| 2237 | 2273 | nicvf_unregister_interrupts(nic); |
|---|
| 2238 | 2274 | err_free_netdev: |
|---|
| .. | .. |
|---|
| 2259 | 2295 | nic = netdev_priv(netdev); |
|---|
| 2260 | 2296 | pnetdev = nic->pnicvf->netdev; |
|---|
| 2261 | 2297 | |
|---|
| 2262 | | - cancel_delayed_work_sync(&nic->rx_mode_work.work); |
|---|
| 2263 | | - |
|---|
| 2264 | 2298 | /* Check if this Qset is assigned to different VF. |
|---|
| 2265 | 2299 | * If yes, clean primary and all secondary Qsets. |
|---|
| 2266 | 2300 | */ |
|---|
| 2267 | 2301 | if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) |
|---|
| 2268 | 2302 | unregister_netdev(pnetdev); |
|---|
| 2303 | + if (nic->nicvf_rx_mode_wq) { |
|---|
| 2304 | + destroy_workqueue(nic->nicvf_rx_mode_wq); |
|---|
| 2305 | + nic->nicvf_rx_mode_wq = NULL; |
|---|
| 2306 | + } |
|---|
| 2269 | 2307 | nicvf_unregister_interrupts(nic); |
|---|
| 2270 | 2308 | pci_set_drvdata(pdev, NULL); |
|---|
| 2271 | 2309 | if (nic->drv_stats) |
|---|
| .. | .. |
|---|
| 2292 | 2330 | static int __init nicvf_init_module(void) |
|---|
| 2293 | 2331 | { |
|---|
| 2294 | 2332 | pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); |
|---|
| 2295 | | - nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic", |
|---|
| 2296 | | - WQ_MEM_RECLAIM); |
|---|
| 2297 | 2333 | return pci_register_driver(&nicvf_driver); |
|---|
| 2298 | 2334 | } |
|---|
| 2299 | 2335 | |
|---|
| 2300 | 2336 | static void __exit nicvf_cleanup_module(void) |
|---|
| 2301 | 2337 | { |
|---|
| 2302 | | - if (nicvf_rx_mode_wq) { |
|---|
| 2303 | | - destroy_workqueue(nicvf_rx_mode_wq); |
|---|
| 2304 | | - nicvf_rx_mode_wq = NULL; |
|---|
| 2305 | | - } |
|---|
| 2306 | 2338 | pci_unregister_driver(&nicvf_driver); |
|---|
| 2307 | 2339 | } |
|---|
| 2308 | 2340 | |
|---|