forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/cavium/thunder/nicvf_main.c
....@@ -1,9 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2015 Cavium, Inc.
3
- *
4
- * This program is free software; you can redistribute it and/or modify it
5
- * under the terms of version 2 of the GNU General Public License
6
- * as published by the Free Software Foundation.
74 */
85
96 #include <linux/module.h>
....@@ -75,9 +72,6 @@
7572 MODULE_PARM_DESC(cpi_alg,
7673 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
7774
78
-/* workqueue for handling kernel ndo_set_rx_mode() calls */
79
-static struct workqueue_struct *nicvf_rx_mode_wq;
80
-
8175 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
8276 {
8377 if (nic->sqs_mode)
....@@ -132,34 +126,39 @@
132126
133127 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
134128 {
135
- int timeout = NIC_MBOX_MSG_TIMEOUT;
136
- int sleep = 10;
129
+ unsigned long timeout;
130
+ int ret = 0;
131
+
132
+ mutex_lock(&nic->rx_mode_mtx);
137133
138134 nic->pf_acked = false;
139135 nic->pf_nacked = false;
140136
141137 nicvf_write_to_mbx(nic, mbx);
142138
139
+ timeout = jiffies + msecs_to_jiffies(NIC_MBOX_MSG_TIMEOUT);
143140 /* Wait for previous message to be acked, timeout 2sec */
144141 while (!nic->pf_acked) {
145142 if (nic->pf_nacked) {
146143 netdev_err(nic->netdev,
147144 "PF NACK to mbox msg 0x%02x from VF%d\n",
148145 (mbx->msg.msg & 0xFF), nic->vf_id);
149
- return -EINVAL;
146
+ ret = -EINVAL;
147
+ break;
150148 }
151
- msleep(sleep);
149
+ usleep_range(8000, 10000);
152150 if (nic->pf_acked)
153151 break;
154
- timeout -= sleep;
155
- if (!timeout) {
152
+ if (time_after(jiffies, timeout)) {
156153 netdev_err(nic->netdev,
157154 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
158155 (mbx->msg.msg & 0xFF), nic->vf_id);
159
- return -EBUSY;
156
+ ret = -EBUSY;
157
+ break;
160158 }
161159 }
162
- return 0;
160
+ mutex_unlock(&nic->rx_mode_mtx);
161
+ return ret;
163162 }
164163
165164 /* Checks if VF is able to comminicate with PF
....@@ -246,21 +245,24 @@
246245 break;
247246 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
248247 nic->pf_acked = true;
249
- nic->link_up = mbx.link_status.link_up;
250
- nic->duplex = mbx.link_status.duplex;
251
- nic->speed = mbx.link_status.speed;
252
- nic->mac_type = mbx.link_status.mac_type;
253
- if (nic->link_up) {
254
- netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n",
255
- nic->speed,
256
- nic->duplex == DUPLEX_FULL ?
257
- "Full" : "Half");
258
- netif_carrier_on(nic->netdev);
259
- netif_tx_start_all_queues(nic->netdev);
260
- } else {
261
- netdev_info(nic->netdev, "Link is Down\n");
262
- netif_carrier_off(nic->netdev);
263
- netif_tx_stop_all_queues(nic->netdev);
248
+ if (nic->link_up != mbx.link_status.link_up) {
249
+ nic->link_up = mbx.link_status.link_up;
250
+ nic->duplex = mbx.link_status.duplex;
251
+ nic->speed = mbx.link_status.speed;
252
+ nic->mac_type = mbx.link_status.mac_type;
253
+ if (nic->link_up) {
254
+ netdev_info(nic->netdev,
255
+ "Link is Up %d Mbps %s duplex\n",
256
+ nic->speed,
257
+ nic->duplex == DUPLEX_FULL ?
258
+ "Full" : "Half");
259
+ netif_carrier_on(nic->netdev);
260
+ netif_tx_start_all_queues(nic->netdev);
261
+ } else {
262
+ netdev_info(nic->netdev, "Link is Down\n");
263
+ netif_carrier_off(nic->netdev);
264
+ netif_tx_stop_all_queues(nic->netdev);
265
+ }
264266 }
265267 break;
266268 case NIC_MBOX_MSG_ALLOC_SQS:
....@@ -550,6 +552,7 @@
550552 xdp_set_data_meta_invalid(&xdp);
551553 xdp.data_end = xdp.data + len;
552554 xdp.rxq = &rq->xdp_rxq;
555
+ xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
553556 orig_data = xdp.data;
554557
555558 rcu_read_lock();
....@@ -591,10 +594,10 @@
591594 return true;
592595 default:
593596 bpf_warn_invalid_xdp_action(action);
594
- /* fall through */
597
+ fallthrough;
595598 case XDP_ABORTED:
596599 trace_xdp_exception(nic->netdev, prog, action);
597
- /* fall through */
600
+ fallthrough;
598601 case XDP_DROP:
599602 /* Check if it's a recycled page, if not
600603 * unmap the DMA mapping.
....@@ -982,9 +985,9 @@
982985 *
983986 * As of now only CQ errors are handled
984987 */
985
-static void nicvf_handle_qs_err(unsigned long data)
988
+static void nicvf_handle_qs_err(struct tasklet_struct *t)
986989 {
987
- struct nicvf *nic = (struct nicvf *)data;
990
+ struct nicvf *nic = from_tasklet(nic, t, qs_err_task);
988991 struct queue_set *qs = nic->qs;
989992 int qidx;
990993 u64 status;
....@@ -1329,6 +1332,12 @@
13291332 struct nicvf_cq_poll *cq_poll = NULL;
13301333 union nic_mbx mbx = {};
13311334
1335
+ /* wait till all queued set_rx_mode tasks completes */
1336
+ if (nic->nicvf_rx_mode_wq) {
1337
+ cancel_delayed_work_sync(&nic->link_change_work);
1338
+ drain_workqueue(nic->nicvf_rx_mode_wq);
1339
+ }
1340
+
13321341 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
13331342 nicvf_send_msg_to_pf(nic, &mbx);
13341343
....@@ -1428,12 +1437,28 @@
14281437 return nicvf_send_msg_to_pf(nic, &mbx);
14291438 }
14301439
1440
+static void nicvf_link_status_check_task(struct work_struct *work_arg)
1441
+{
1442
+ struct nicvf *nic = container_of(work_arg,
1443
+ struct nicvf,
1444
+ link_change_work.work);
1445
+ union nic_mbx mbx = {};
1446
+ mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1447
+ nicvf_send_msg_to_pf(nic, &mbx);
1448
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
1449
+ &nic->link_change_work, 2 * HZ);
1450
+}
1451
+
14311452 int nicvf_open(struct net_device *netdev)
14321453 {
14331454 int cpu, err, qidx;
14341455 struct nicvf *nic = netdev_priv(netdev);
14351456 struct queue_set *qs = nic->qs;
14361457 struct nicvf_cq_poll *cq_poll = NULL;
1458
+
1459
+ /* wait till all queued set_rx_mode tasks completes if any */
1460
+ if (nic->nicvf_rx_mode_wq)
1461
+ drain_workqueue(nic->nicvf_rx_mode_wq);
14371462
14381463 netif_carrier_off(netdev);
14391464
....@@ -1468,12 +1493,10 @@
14681493 }
14691494
14701495 /* Init tasklet for handling Qset err interrupt */
1471
- tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
1472
- (unsigned long)nic);
1496
+ tasklet_setup(&nic->qs_err_task, nicvf_handle_qs_err);
14731497
14741498 /* Init RBDR tasklet which will refill RBDR */
1475
- tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
1476
- (unsigned long)nic);
1499
+ tasklet_setup(&nic->rbdr_task, nicvf_rbdr_task);
14771500 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
14781501
14791502 /* Configure CPI alorithm */
....@@ -1530,6 +1553,13 @@
15301553
15311554 /* Send VF config done msg to PF */
15321555 nicvf_send_cfg_done(nic);
1556
+
1557
+ if (nic->nicvf_rx_mode_wq) {
1558
+ INIT_DELAYED_WORK(&nic->link_change_work,
1559
+ nicvf_link_status_check_task);
1560
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
1561
+ &nic->link_change_work, 0);
1562
+ }
15331563
15341564 return 0;
15351565 cleanup:
....@@ -1709,7 +1739,7 @@
17091739
17101740 }
17111741
1712
-static void nicvf_tx_timeout(struct net_device *dev)
1742
+static void nicvf_tx_timeout(struct net_device *dev, unsigned int txqueue)
17131743 {
17141744 struct nicvf *nic = netdev_priv(dev);
17151745
....@@ -1844,13 +1874,8 @@
18441874
18451875 if (nic->xdp_prog) {
18461876 /* Attach BPF program */
1847
- nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
1848
- if (!IS_ERR(nic->xdp_prog)) {
1849
- bpf_attached = true;
1850
- } else {
1851
- ret = PTR_ERR(nic->xdp_prog);
1852
- nic->xdp_prog = NULL;
1853
- }
1877
+ bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
1878
+ bpf_attached = true;
18541879 }
18551880
18561881 /* Calculate Tx queues needed for XDP and network stack */
....@@ -1879,9 +1904,6 @@
18791904 switch (xdp->command) {
18801905 case XDP_SETUP_PROG:
18811906 return nicvf_xdp_setup(nic, xdp->prog);
1882
- case XDP_QUERY_PROG:
1883
- xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
1884
- return 0;
18851907 default:
18861908 return -EINVAL;
18871909 }
....@@ -1976,7 +1998,7 @@
19761998 * its' own LMAC to the filter to accept packets for it.
19771999 */
19782000 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1979
- mbx.xcast.data.mac = 0;
2001
+ mbx.xcast.mac = 0;
19802002 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
19812003 goto free_mc;
19822004 }
....@@ -1986,7 +2008,7 @@
19862008 /* now go through kernel list of MACs and add them one by one */
19872009 for (idx = 0; idx < mc_addrs->count; idx++) {
19882010 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1989
- mbx.xcast.data.mac = mc_addrs->mc[idx];
2011
+ mbx.xcast.mac = mc_addrs->mc[idx];
19902012 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
19912013 goto free_mc;
19922014 }
....@@ -1994,7 +2016,7 @@
19942016
19952017 /* and finally set rx mode for PF accordingly */
19962018 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
1997
- mbx.xcast.data.mode = mode;
2019
+ mbx.xcast.mode = mode;
19982020
19992021 nicvf_send_msg_to_pf(nic, &mbx);
20002022 free_mc:
....@@ -2004,7 +2026,7 @@
20042026 static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
20052027 {
20062028 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
2007
- work.work);
2029
+ work);
20082030 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
20092031 u8 mode;
20102032 struct xcast_addr_list *mc;
....@@ -2043,8 +2065,8 @@
20432065 mode |= BGX_XCAST_MCAST_FILTER;
20442066 /* here we need to copy mc addrs */
20452067 if (netdev_mc_count(netdev)) {
2046
- mc_list = kmalloc(offsetof(typeof(*mc_list),
2047
- mc[netdev_mc_count(netdev)]),
2068
+ mc_list = kmalloc(struct_size(mc_list, mc,
2069
+ netdev_mc_count(netdev)),
20482070 GFP_ATOMIC);
20492071 if (unlikely(!mc_list))
20502072 return;
....@@ -2061,7 +2083,7 @@
20612083 kfree(nic->rx_mode_work.mc);
20622084 nic->rx_mode_work.mc = mc_list;
20632085 nic->rx_mode_work.mode = mode;
2064
- queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0);
2086
+ queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
20652087 spin_unlock(&nic->rx_mode_wq_lock);
20662088 }
20672089
....@@ -2153,6 +2175,9 @@
21532175 nic->max_queues *= 2;
21542176 nic->ptp_clock = ptp_clock;
21552177
2178
+ /* Initialize mutex that serializes usage of VF's mailbox */
2179
+ mutex_init(&nic->rx_mode_mtx);
2180
+
21562181 /* MAP VF's configuration registers */
21572182 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
21582183 if (!nic->reg_base) {
....@@ -2218,13 +2243,22 @@
22182243
22192244 INIT_WORK(&nic->reset_task, nicvf_reset_task);
22202245
2221
- INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2246
+ nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
2247
+ WQ_MEM_RECLAIM,
2248
+ nic->vf_id);
2249
+ if (!nic->nicvf_rx_mode_wq) {
2250
+ err = -ENOMEM;
2251
+ dev_err(dev, "Failed to allocate work queue\n");
2252
+ goto err_unregister_interrupts;
2253
+ }
2254
+
2255
+ INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
22222256 spin_lock_init(&nic->rx_mode_wq_lock);
22232257
22242258 err = register_netdev(netdev);
22252259 if (err) {
22262260 dev_err(dev, "Failed to register netdevice\n");
2227
- goto err_unregister_interrupts;
2261
+ goto err_destroy_workqueue;
22282262 }
22292263
22302264 nic->msg_enable = debug;
....@@ -2233,6 +2267,8 @@
22332267
22342268 return 0;
22352269
2270
+err_destroy_workqueue:
2271
+ destroy_workqueue(nic->nicvf_rx_mode_wq);
22362272 err_unregister_interrupts:
22372273 nicvf_unregister_interrupts(nic);
22382274 err_free_netdev:
....@@ -2259,13 +2295,15 @@
22592295 nic = netdev_priv(netdev);
22602296 pnetdev = nic->pnicvf->netdev;
22612297
2262
- cancel_delayed_work_sync(&nic->rx_mode_work.work);
2263
-
22642298 /* Check if this Qset is assigned to different VF.
22652299 * If yes, clean primary and all secondary Qsets.
22662300 */
22672301 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
22682302 unregister_netdev(pnetdev);
2303
+ if (nic->nicvf_rx_mode_wq) {
2304
+ destroy_workqueue(nic->nicvf_rx_mode_wq);
2305
+ nic->nicvf_rx_mode_wq = NULL;
2306
+ }
22692307 nicvf_unregister_interrupts(nic);
22702308 pci_set_drvdata(pdev, NULL);
22712309 if (nic->drv_stats)
....@@ -2292,17 +2330,11 @@
22922330 static int __init nicvf_init_module(void)
22932331 {
22942332 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2295
- nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
2296
- WQ_MEM_RECLAIM);
22972333 return pci_register_driver(&nicvf_driver);
22982334 }
22992335
23002336 static void __exit nicvf_cleanup_module(void)
23012337 {
2302
- if (nicvf_rx_mode_wq) {
2303
- destroy_workqueue(nicvf_rx_mode_wq);
2304
- nicvf_rx_mode_wq = NULL;
2305
- }
23062338 pci_unregister_driver(&nicvf_driver);
23072339 }
23082340