.. | .. |
---|
4 | 4 | #include <linux/dma-mapping.h> |
---|
5 | 5 | #include <linux/etherdevice.h> |
---|
6 | 6 | #include <linux/interrupt.h> |
---|
| 7 | +#ifdef CONFIG_RFS_ACCEL |
---|
| 8 | +#include <linux/cpu_rmap.h> |
---|
| 9 | +#endif |
---|
7 | 10 | #include <linux/if_vlan.h> |
---|
| 11 | +#include <linux/irq.h> |
---|
8 | 12 | #include <linux/ip.h> |
---|
9 | 13 | #include <linux/ipv6.h> |
---|
10 | 14 | #include <linux/module.h> |
---|
11 | 15 | #include <linux/pci.h> |
---|
| 16 | +#include <linux/aer.h> |
---|
12 | 17 | #include <linux/skbuff.h> |
---|
13 | 18 | #include <linux/sctp.h> |
---|
14 | | -#include <linux/vermagic.h> |
---|
15 | 19 | #include <net/gre.h> |
---|
| 20 | +#include <net/ip6_checksum.h> |
---|
16 | 21 | #include <net/pkt_cls.h> |
---|
| 22 | +#include <net/tcp.h> |
---|
17 | 23 | #include <net/vxlan.h> |
---|
| 24 | +#include <net/geneve.h> |
---|
18 | 25 | |
---|
19 | 26 | #include "hnae3.h" |
---|
20 | 27 | #include "hns3_enet.h" |
---|
| 28 | +/* All hns3 tracepoints are defined by the include below, which |
---|
| 29 | + * must be included exactly once across the whole kernel with |
---|
| 30 | + * CREATE_TRACE_POINTS defined |
---|
| 31 | + */ |
---|
| 32 | +#define CREATE_TRACE_POINTS |
---|
| 33 | +#include "hns3_trace.h" |
---|
21 | 34 | |
---|
22 | | -static void hns3_clear_all_ring(struct hnae3_handle *h); |
---|
23 | | -static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); |
---|
| 35 | +#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) |
---|
| 36 | +#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) |
---|
| 37 | + |
---|
| 38 | +#define hns3_rl_err(fmt, ...) \ |
---|
| 39 | + do { \ |
---|
| 40 | + if (net_ratelimit()) \ |
---|
| 41 | + netdev_err(fmt, ##__VA_ARGS__); \ |
---|
| 42 | + } while (0) |
---|
| 43 | + |
---|
| 44 | +static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); |
---|
24 | 45 | |
---|
25 | 46 | static const char hns3_driver_name[] = "hns3"; |
---|
26 | | -const char hns3_driver_version[] = VERMAGIC_STRING; |
---|
27 | 47 | static const char hns3_driver_string[] = |
---|
28 | 48 | "Hisilicon Ethernet Network Driver for Hip08 Family"; |
---|
29 | 49 | static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; |
---|
30 | 50 | static struct hnae3_client client; |
---|
31 | 51 | |
---|
| 52 | +static int debug = -1; |
---|
| 53 | +module_param(debug, int, 0); |
---|
| 54 | +MODULE_PARM_DESC(debug, " Network interface message level setting"); |
---|
| 55 | + |
---|
| 56 | +#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ |
---|
| 57 | + NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) |
---|
| 58 | + |
---|
| 59 | +#define HNS3_INNER_VLAN_TAG 1 |
---|
| 60 | +#define HNS3_OUTER_VLAN_TAG 2 |
---|
| 61 | + |
---|
| 62 | +#define HNS3_MIN_TX_LEN 33U |
---|
32 | 63 | #define HNS3_MIN_TUN_PKT_LEN 65U |
---|
33 | 64 | |
---|
34 | 65 | /* hns3_pci_tbl - PCI Device ID Table |
---|
.. | .. |
---|
51 | 82 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
---|
52 | 83 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), |
---|
53 | 84 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
---|
54 | | - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, |
---|
55 | | - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), |
---|
| 85 | + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), |
---|
| 86 | + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
---|
| 87 | + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, |
---|
| 88 | + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), |
---|
56 | 89 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
---|
57 | 90 | /* required last entry */ |
---|
58 | 91 | {0, } |
---|
.. | .. |
---|
63 | 96 | { |
---|
64 | 97 | struct hns3_enet_tqp_vector *tqp_vector = vector; |
---|
65 | 98 | |
---|
66 | | - napi_schedule(&tqp_vector->napi); |
---|
| 99 | + napi_schedule_irqoff(&tqp_vector->napi); |
---|
67 | 100 | |
---|
68 | 101 | return IRQ_HANDLED; |
---|
69 | 102 | } |
---|
.. | .. |
---|
78 | 111 | |
---|
79 | 112 | if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) |
---|
80 | 113 | continue; |
---|
| 114 | + |
---|
| 115 | + /* clear the affinity mask */ |
---|
| 116 | + irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); |
---|
81 | 117 | |
---|
82 | 118 | /* release the irq resource */ |
---|
83 | 119 | free_irq(tqp_vectors->vector_irq, tqp_vectors); |
---|
.. | .. |
---|
101 | 137 | continue; |
---|
102 | 138 | |
---|
103 | 139 | if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { |
---|
104 | | - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, |
---|
105 | | - "%s-%s-%d", priv->netdev->name, "TxRx", |
---|
106 | | - txrx_int_idx++); |
---|
| 140 | + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, |
---|
| 141 | + "%s-%s-%s-%d", hns3_driver_name, |
---|
| 142 | + pci_name(priv->ae_handle->pdev), |
---|
| 143 | + "TxRx", txrx_int_idx++); |
---|
107 | 144 | txrx_int_idx++; |
---|
108 | 145 | } else if (tqp_vectors->rx_group.ring) { |
---|
109 | | - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, |
---|
110 | | - "%s-%s-%d", priv->netdev->name, "Rx", |
---|
111 | | - rx_int_idx++); |
---|
| 146 | + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, |
---|
| 147 | + "%s-%s-%s-%d", hns3_driver_name, |
---|
| 148 | + pci_name(priv->ae_handle->pdev), |
---|
| 149 | + "Rx", rx_int_idx++); |
---|
112 | 150 | } else if (tqp_vectors->tx_group.ring) { |
---|
113 | | - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, |
---|
114 | | - "%s-%s-%d", priv->netdev->name, "Tx", |
---|
115 | | - tx_int_idx++); |
---|
| 151 | + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, |
---|
| 152 | + "%s-%s-%s-%d", hns3_driver_name, |
---|
| 153 | + pci_name(priv->ae_handle->pdev), |
---|
| 154 | + "Tx", tx_int_idx++); |
---|
116 | 155 | } else { |
---|
117 | 156 | /* Skip this unused q_vector */ |
---|
118 | 157 | continue; |
---|
.. | .. |
---|
120 | 159 | |
---|
121 | 160 | tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; |
---|
122 | 161 | |
---|
| 162 | + irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); |
---|
123 | 163 | ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, |
---|
124 | | - tqp_vectors->name, |
---|
125 | | - tqp_vectors); |
---|
| 164 | + tqp_vectors->name, tqp_vectors); |
---|
126 | 165 | if (ret) { |
---|
127 | 166 | netdev_err(priv->netdev, "request irq(%d) fail\n", |
---|
128 | 167 | tqp_vectors->vector_irq); |
---|
| 168 | + hns3_nic_uninit_irq(priv); |
---|
129 | 169 | return ret; |
---|
130 | 170 | } |
---|
| 171 | + |
---|
| 172 | + irq_set_affinity_hint(tqp_vectors->vector_irq, |
---|
| 173 | + &tqp_vectors->affinity_mask); |
---|
131 | 174 | |
---|
132 | 175 | tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; |
---|
133 | 176 | } |
---|
.. | .. |
---|
144 | 187 | static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) |
---|
145 | 188 | { |
---|
146 | 189 | napi_enable(&tqp_vector->napi); |
---|
| 190 | + enable_irq(tqp_vector->vector_irq); |
---|
147 | 191 | |
---|
148 | 192 | /* enable vector */ |
---|
149 | 193 | hns3_mask_vector_irq(tqp_vector, 1); |
---|
.. | .. |
---|
200 | 244 | /* initialize the configuration for interrupt coalescing. |
---|
201 | 245 | * 1. GL (Interrupt Gap Limiter) |
---|
202 | 246 | * 2. RL (Interrupt Rate Limiter) |
---|
| 247 | + * |
---|
| 248 | + * Default: enable interrupt coalescing self-adaptive and GL |
---|
203 | 249 | */ |
---|
204 | | - |
---|
205 | | - /* Default: enable interrupt coalescing self-adaptive and GL */ |
---|
206 | 250 | tqp_vector->tx_group.coal.gl_adapt_enable = 1; |
---|
207 | 251 | tqp_vector->rx_group.coal.gl_adapt_enable = 1; |
---|
208 | 252 | |
---|
209 | 253 | tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; |
---|
210 | 254 | tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; |
---|
211 | 255 | |
---|
212 | | - tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; |
---|
213 | 256 | tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; |
---|
214 | 257 | tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; |
---|
215 | 258 | } |
---|
.. | .. |
---|
257 | 300 | ret = netif_set_real_num_tx_queues(netdev, queue_size); |
---|
258 | 301 | if (ret) { |
---|
259 | 302 | netdev_err(netdev, |
---|
260 | | - "netif_set_real_num_tx_queues fail, ret=%d!\n", |
---|
261 | | - ret); |
---|
| 303 | + "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); |
---|
262 | 304 | return ret; |
---|
263 | 305 | } |
---|
264 | 306 | |
---|
.. | .. |
---|
274 | 316 | |
---|
275 | 317 | static u16 hns3_get_max_available_channels(struct hnae3_handle *h) |
---|
276 | 318 | { |
---|
277 | | - u16 free_tqps, max_rss_size, max_tqps; |
---|
| 319 | + u16 alloc_tqps, max_rss_size, rss_size; |
---|
278 | 320 | |
---|
279 | | - h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); |
---|
280 | | - max_tqps = h->kinfo.num_tc * max_rss_size; |
---|
| 321 | + h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); |
---|
| 322 | + rss_size = alloc_tqps / h->kinfo.num_tc; |
---|
281 | 323 | |
---|
282 | | - return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); |
---|
| 324 | + return min_t(u16, rss_size, max_rss_size); |
---|
| 325 | +} |
---|
| 326 | + |
---|
| 327 | +static void hns3_tqp_enable(struct hnae3_queue *tqp) |
---|
| 328 | +{ |
---|
| 329 | + u32 rcb_reg; |
---|
| 330 | + |
---|
| 331 | + rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); |
---|
| 332 | + rcb_reg |= BIT(HNS3_RING_EN_B); |
---|
| 333 | + hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); |
---|
| 334 | +} |
---|
| 335 | + |
---|
| 336 | +static void hns3_tqp_disable(struct hnae3_queue *tqp) |
---|
| 337 | +{ |
---|
| 338 | + u32 rcb_reg; |
---|
| 339 | + |
---|
| 340 | + rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); |
---|
| 341 | + rcb_reg &= ~BIT(HNS3_RING_EN_B); |
---|
| 342 | + hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); |
---|
| 343 | +} |
---|
| 344 | + |
---|
| 345 | +static void hns3_free_rx_cpu_rmap(struct net_device *netdev) |
---|
| 346 | +{ |
---|
| 347 | +#ifdef CONFIG_RFS_ACCEL |
---|
| 348 | + free_irq_cpu_rmap(netdev->rx_cpu_rmap); |
---|
| 349 | + netdev->rx_cpu_rmap = NULL; |
---|
| 350 | +#endif |
---|
| 351 | +} |
---|
| 352 | + |
---|
| 353 | +static int hns3_set_rx_cpu_rmap(struct net_device *netdev) |
---|
| 354 | +{ |
---|
| 355 | +#ifdef CONFIG_RFS_ACCEL |
---|
| 356 | + struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
| 357 | + struct hns3_enet_tqp_vector *tqp_vector; |
---|
| 358 | + int i, ret; |
---|
| 359 | + |
---|
| 360 | + if (!netdev->rx_cpu_rmap) { |
---|
| 361 | + netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); |
---|
| 362 | + if (!netdev->rx_cpu_rmap) |
---|
| 363 | + return -ENOMEM; |
---|
| 364 | + } |
---|
| 365 | + |
---|
| 366 | + for (i = 0; i < priv->vector_num; i++) { |
---|
| 367 | + tqp_vector = &priv->tqp_vector[i]; |
---|
| 368 | + ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, |
---|
| 369 | + tqp_vector->vector_irq); |
---|
| 370 | + if (ret) { |
---|
| 371 | + hns3_free_rx_cpu_rmap(netdev); |
---|
| 372 | + return ret; |
---|
| 373 | + } |
---|
| 374 | + } |
---|
| 375 | +#endif |
---|
| 376 | + return 0; |
---|
283 | 377 | } |
---|
284 | 378 | |
---|
285 | 379 | static int hns3_nic_net_up(struct net_device *netdev) |
---|
.. | .. |
---|
293 | 387 | if (ret) |
---|
294 | 388 | return ret; |
---|
295 | 389 | |
---|
296 | | - /* get irq resource for all vectors */ |
---|
297 | | - ret = hns3_nic_init_irq(priv); |
---|
298 | | - if (ret) { |
---|
299 | | - netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); |
---|
300 | | - return ret; |
---|
301 | | - } |
---|
| 390 | + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); |
---|
302 | 391 | |
---|
303 | 392 | /* enable the vectors */ |
---|
304 | 393 | for (i = 0; i < priv->vector_num; i++) |
---|
305 | 394 | hns3_vector_enable(&priv->tqp_vector[i]); |
---|
306 | 395 | |
---|
| 396 | + /* enable rcb */ |
---|
| 397 | + for (j = 0; j < h->kinfo.num_tqps; j++) |
---|
| 398 | + hns3_tqp_enable(h->kinfo.tqp[j]); |
---|
| 399 | + |
---|
307 | 400 | /* start the ae_dev */ |
---|
308 | 401 | ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; |
---|
309 | | - if (ret) |
---|
310 | | - goto out_start_err; |
---|
| 402 | + if (ret) { |
---|
| 403 | + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); |
---|
| 404 | + while (j--) |
---|
| 405 | + hns3_tqp_disable(h->kinfo.tqp[j]); |
---|
311 | 406 | |
---|
312 | | - clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); |
---|
313 | | - |
---|
314 | | - return 0; |
---|
315 | | - |
---|
316 | | -out_start_err: |
---|
317 | | - for (j = i - 1; j >= 0; j--) |
---|
318 | | - hns3_vector_disable(&priv->tqp_vector[j]); |
---|
319 | | - |
---|
320 | | - hns3_nic_uninit_irq(priv); |
---|
| 407 | + for (j = i - 1; j >= 0; j--) |
---|
| 408 | + hns3_vector_disable(&priv->tqp_vector[j]); |
---|
| 409 | + } |
---|
321 | 410 | |
---|
322 | 411 | return ret; |
---|
| 412 | +} |
---|
| 413 | + |
---|
| 414 | +static void hns3_config_xps(struct hns3_nic_priv *priv) |
---|
| 415 | +{ |
---|
| 416 | + int i; |
---|
| 417 | + |
---|
| 418 | + for (i = 0; i < priv->vector_num; i++) { |
---|
| 419 | + struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; |
---|
| 420 | + struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; |
---|
| 421 | + |
---|
| 422 | + while (ring) { |
---|
| 423 | + int ret; |
---|
| 424 | + |
---|
| 425 | + ret = netif_set_xps_queue(priv->netdev, |
---|
| 426 | + &tqp_vector->affinity_mask, |
---|
| 427 | + ring->tqp->tqp_index); |
---|
| 428 | + if (ret) |
---|
| 429 | + netdev_warn(priv->netdev, |
---|
| 430 | + "set xps queue failed: %d", ret); |
---|
| 431 | + |
---|
| 432 | + ring = ring->next; |
---|
| 433 | + } |
---|
| 434 | + } |
---|
323 | 435 | } |
---|
324 | 436 | |
---|
325 | 437 | static int hns3_nic_net_open(struct net_device *netdev) |
---|
.. | .. |
---|
329 | 441 | struct hnae3_knic_private_info *kinfo; |
---|
330 | 442 | int i, ret; |
---|
331 | 443 | |
---|
| 444 | + if (hns3_nic_resetting(netdev)) |
---|
| 445 | + return -EBUSY; |
---|
| 446 | + |
---|
| 447 | + if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { |
---|
| 448 | + netdev_warn(netdev, "net open repeatedly!\n"); |
---|
| 449 | + return 0; |
---|
| 450 | + } |
---|
| 451 | + |
---|
332 | 452 | netif_carrier_off(netdev); |
---|
333 | 453 | |
---|
334 | 454 | ret = hns3_nic_set_real_num_queue(netdev); |
---|
.. | .. |
---|
337 | 457 | |
---|
338 | 458 | ret = hns3_nic_net_up(netdev); |
---|
339 | 459 | if (ret) { |
---|
340 | | - netdev_err(netdev, |
---|
341 | | - "hns net up fail, ret=%d!\n", ret); |
---|
| 460 | + netdev_err(netdev, "net up fail, ret=%d!\n", ret); |
---|
342 | 461 | return ret; |
---|
343 | 462 | } |
---|
344 | 463 | |
---|
345 | 464 | kinfo = &h->kinfo; |
---|
346 | | - for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { |
---|
347 | | - netdev_set_prio_tc_map(netdev, i, |
---|
348 | | - kinfo->prio_tc[i]); |
---|
349 | | - } |
---|
| 465 | + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) |
---|
| 466 | + netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]); |
---|
350 | 467 | |
---|
351 | | - priv->ae_handle->last_reset_time = jiffies; |
---|
| 468 | + if (h->ae_algo->ops->set_timer_task) |
---|
| 469 | + h->ae_algo->ops->set_timer_task(priv->ae_handle, true); |
---|
| 470 | + |
---|
| 471 | + hns3_config_xps(priv); |
---|
| 472 | + |
---|
| 473 | + netif_dbg(h, drv, netdev, "net open\n"); |
---|
| 474 | + |
---|
352 | 475 | return 0; |
---|
| 476 | +} |
---|
| 477 | + |
---|
| 478 | +static void hns3_reset_tx_queue(struct hnae3_handle *h) |
---|
| 479 | +{ |
---|
| 480 | + struct net_device *ndev = h->kinfo.netdev; |
---|
| 481 | + struct hns3_nic_priv *priv = netdev_priv(ndev); |
---|
| 482 | + struct netdev_queue *dev_queue; |
---|
| 483 | + u32 i; |
---|
| 484 | + |
---|
| 485 | + for (i = 0; i < h->kinfo.num_tqps; i++) { |
---|
| 486 | + dev_queue = netdev_get_tx_queue(ndev, |
---|
| 487 | + priv->ring[i].queue_index); |
---|
| 488 | + netdev_tx_reset_queue(dev_queue); |
---|
| 489 | + } |
---|
353 | 490 | } |
---|
354 | 491 | |
---|
355 | 492 | static void hns3_nic_net_down(struct net_device *netdev) |
---|
356 | 493 | { |
---|
357 | 494 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
| 495 | + struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
358 | 496 | const struct hnae3_ae_ops *ops; |
---|
359 | 497 | int i; |
---|
360 | | - |
---|
361 | | - if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) |
---|
362 | | - return; |
---|
363 | 498 | |
---|
364 | 499 | /* disable vectors */ |
---|
365 | 500 | for (i = 0; i < priv->vector_num; i++) |
---|
366 | 501 | hns3_vector_disable(&priv->tqp_vector[i]); |
---|
| 502 | + |
---|
| 503 | + /* disable rcb */ |
---|
| 504 | + for (i = 0; i < h->kinfo.num_tqps; i++) |
---|
| 505 | + hns3_tqp_disable(h->kinfo.tqp[i]); |
---|
367 | 506 | |
---|
368 | 507 | /* stop ae_dev */ |
---|
369 | 508 | ops = priv->ae_handle->ae_algo->ops; |
---|
370 | 509 | if (ops->stop) |
---|
371 | 510 | ops->stop(priv->ae_handle); |
---|
372 | 511 | |
---|
373 | | - /* free irq resources */ |
---|
374 | | - hns3_nic_uninit_irq(priv); |
---|
| 512 | + /* delay ring buffer clearing to hns3_reset_notify_uninit_enet |
---|
| 513 | + * during reset process, because driver may not be able |
---|
| 514 | + * to disable the ring through firmware when downing the netdev. |
---|
| 515 | + */ |
---|
| 516 | + if (!hns3_nic_resetting(netdev)) |
---|
| 517 | + hns3_clear_all_ring(priv->ae_handle, false); |
---|
375 | 518 | |
---|
376 | | - hns3_clear_all_ring(priv->ae_handle); |
---|
| 519 | + hns3_reset_tx_queue(priv->ae_handle); |
---|
377 | 520 | } |
---|
378 | 521 | |
---|
379 | 522 | static int hns3_nic_net_stop(struct net_device *netdev) |
---|
380 | 523 | { |
---|
381 | | - netif_tx_stop_all_queues(netdev); |
---|
| 524 | + struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
| 525 | + struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
| 526 | + |
---|
| 527 | + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) |
---|
| 528 | + return 0; |
---|
| 529 | + |
---|
| 530 | + netif_dbg(h, drv, netdev, "net stop\n"); |
---|
| 531 | + |
---|
| 532 | + if (h->ae_algo->ops->set_timer_task) |
---|
| 533 | + h->ae_algo->ops->set_timer_task(priv->ae_handle, false); |
---|
| 534 | + |
---|
382 | 535 | netif_carrier_off(netdev); |
---|
| 536 | + netif_tx_disable(netdev); |
---|
383 | 537 | |
---|
384 | 538 | hns3_nic_net_down(netdev); |
---|
385 | 539 | |
---|
.. | .. |
---|
401 | 555 | const unsigned char *addr) |
---|
402 | 556 | { |
---|
403 | 557 | struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
| 558 | + |
---|
| 559 | + /* need ignore the request of removing device address, because |
---|
| 560 | + * we store the device address and other addresses of uc list |
---|
| 561 | + * in the function's mac filter list. |
---|
| 562 | + */ |
---|
| 563 | + if (ether_addr_equal(addr, netdev->dev_addr)) |
---|
| 564 | + return 0; |
---|
404 | 565 | |
---|
405 | 566 | if (h->ae_algo->ops->rm_uc_addr) |
---|
406 | 567 | return h->ae_algo->ops->rm_uc_addr(h, addr); |
---|
.. | .. |
---|
430 | 591 | return 0; |
---|
431 | 592 | } |
---|
432 | 593 | |
---|
| 594 | +static u8 hns3_get_netdev_flags(struct net_device *netdev) |
---|
| 595 | +{ |
---|
| 596 | + u8 flags = 0; |
---|
| 597 | + |
---|
| 598 | + if (netdev->flags & IFF_PROMISC) { |
---|
| 599 | + flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; |
---|
| 600 | + } else { |
---|
| 601 | + flags |= HNAE3_VLAN_FLTR; |
---|
| 602 | + if (netdev->flags & IFF_ALLMULTI) |
---|
| 603 | + flags |= HNAE3_USER_MPE; |
---|
| 604 | + } |
---|
| 605 | + |
---|
| 606 | + return flags; |
---|
| 607 | +} |
---|
| 608 | + |
---|
433 | 609 | static void hns3_nic_set_rx_mode(struct net_device *netdev) |
---|
434 | 610 | { |
---|
435 | 611 | struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
| 612 | + u8 new_flags; |
---|
436 | 613 | |
---|
437 | | - if (h->ae_algo->ops->set_promisc_mode) { |
---|
438 | | - if (netdev->flags & IFF_PROMISC) |
---|
439 | | - h->ae_algo->ops->set_promisc_mode(h, true, true); |
---|
440 | | - else if (netdev->flags & IFF_ALLMULTI) |
---|
441 | | - h->ae_algo->ops->set_promisc_mode(h, false, true); |
---|
442 | | - else |
---|
443 | | - h->ae_algo->ops->set_promisc_mode(h, false, false); |
---|
444 | | - } |
---|
445 | | - if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) |
---|
446 | | - netdev_err(netdev, "sync uc address fail\n"); |
---|
447 | | - if (netdev->flags & IFF_MULTICAST) { |
---|
448 | | - if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) |
---|
449 | | - netdev_err(netdev, "sync mc address fail\n"); |
---|
| 614 | + new_flags = hns3_get_netdev_flags(netdev); |
---|
450 | 615 | |
---|
451 | | - if (h->ae_algo->ops->update_mta_status) |
---|
452 | | - h->ae_algo->ops->update_mta_status(h); |
---|
| 616 | + __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); |
---|
| 617 | + __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); |
---|
| 618 | + |
---|
| 619 | + /* User mode Promisc mode enable and vlan filtering is disabled to |
---|
| 620 | + * let all packets in. |
---|
| 621 | + */ |
---|
| 622 | + h->netdev_flags = new_flags; |
---|
| 623 | + hns3_request_update_promisc_mode(h); |
---|
| 624 | +} |
---|
| 625 | + |
---|
| 626 | +void hns3_request_update_promisc_mode(struct hnae3_handle *handle) |
---|
| 627 | +{ |
---|
| 628 | + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; |
---|
| 629 | + |
---|
| 630 | + if (ops->request_update_promisc_mode) |
---|
| 631 | + ops->request_update_promisc_mode(handle); |
---|
| 632 | +} |
---|
| 633 | + |
---|
| 634 | +void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) |
---|
| 635 | +{ |
---|
| 636 | + struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
| 637 | + struct hnae3_handle *h = priv->ae_handle; |
---|
| 638 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); |
---|
| 639 | + bool last_state; |
---|
| 640 | + |
---|
| 641 | + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 && |
---|
| 642 | + h->ae_algo->ops->enable_vlan_filter) { |
---|
| 643 | + last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; |
---|
| 644 | + if (enable != last_state) { |
---|
| 645 | + netdev_info(netdev, |
---|
| 646 | + "%s vlan filter\n", |
---|
| 647 | + enable ? "enable" : "disable"); |
---|
| 648 | + h->ae_algo->ops->enable_vlan_filter(h, enable); |
---|
| 649 | + } |
---|
453 | 650 | } |
---|
454 | 651 | } |
---|
455 | 652 | |
---|
.. | .. |
---|
466 | 663 | return 0; |
---|
467 | 664 | |
---|
468 | 665 | ret = skb_cow_head(skb, 0); |
---|
469 | | - if (ret) |
---|
| 666 | + if (unlikely(ret < 0)) |
---|
470 | 667 | return ret; |
---|
471 | 668 | |
---|
472 | 669 | l3.hdr = skb_network_header(skb); |
---|
.. | .. |
---|
478 | 675 | if (l3.v4->version == 4) |
---|
479 | 676 | l3.v4->check = 0; |
---|
480 | 677 | |
---|
481 | | - /* tunnel packet.*/ |
---|
| 678 | + /* tunnel packet */ |
---|
482 | 679 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | |
---|
483 | 680 | SKB_GSO_GRE_CSUM | |
---|
484 | 681 | SKB_GSO_UDP_TUNNEL | |
---|
.. | .. |
---|
503 | 700 | l3.v4->check = 0; |
---|
504 | 701 | } |
---|
505 | 702 | |
---|
506 | | - /* normal or tunnel packet*/ |
---|
| 703 | + /* normal or tunnel packet */ |
---|
507 | 704 | l4_offset = l4.hdr - skb->data; |
---|
508 | | - hdr_len = (l4.tcp->doff * 4) + l4_offset; |
---|
509 | 705 | |
---|
510 | | - /* remove payload length from inner pseudo checksum when tso*/ |
---|
| 706 | + /* remove payload length from inner pseudo checksum when tso */ |
---|
511 | 707 | l4_paylen = skb->len - l4_offset; |
---|
512 | | - csum_replace_by_diff(&l4.tcp->check, |
---|
513 | | - (__force __wsum)htonl(l4_paylen)); |
---|
| 708 | + |
---|
| 709 | + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { |
---|
| 710 | + hdr_len = sizeof(*l4.udp) + l4_offset; |
---|
| 711 | + csum_replace_by_diff(&l4.udp->check, |
---|
| 712 | + (__force __wsum)htonl(l4_paylen)); |
---|
| 713 | + } else { |
---|
| 714 | + hdr_len = (l4.tcp->doff << 2) + l4_offset; |
---|
| 715 | + csum_replace_by_diff(&l4.tcp->check, |
---|
| 716 | + (__force __wsum)htonl(l4_paylen)); |
---|
| 717 | + } |
---|
514 | 718 | |
---|
515 | 719 | /* find the txbd field values */ |
---|
516 | 720 | *paylen = skb->len - hdr_len; |
---|
517 | | - hnae3_set_bit(*type_cs_vlan_tso, |
---|
518 | | - HNS3_TXD_TSO_B, 1); |
---|
| 721 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); |
---|
519 | 722 | |
---|
520 | 723 | /* get MSS for TSO */ |
---|
521 | 724 | *mss = skb_shinfo(skb)->gso_size; |
---|
| 725 | + |
---|
| 726 | + trace_hns3_tso(skb); |
---|
522 | 727 | |
---|
523 | 728 | return 0; |
---|
524 | 729 | } |
---|
.. | .. |
---|
526 | 731 | static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, |
---|
527 | 732 | u8 *il4_proto) |
---|
528 | 733 | { |
---|
529 | | - union { |
---|
530 | | - struct iphdr *v4; |
---|
531 | | - struct ipv6hdr *v6; |
---|
532 | | - unsigned char *hdr; |
---|
533 | | - } l3; |
---|
| 734 | + union l3_hdr_info l3; |
---|
534 | 735 | unsigned char *l4_hdr; |
---|
535 | 736 | unsigned char *exthdr; |
---|
536 | 737 | u8 l4_proto_tmp; |
---|
.. | .. |
---|
579 | 780 | return 0; |
---|
580 | 781 | } |
---|
581 | 782 | |
---|
582 | | -static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, |
---|
583 | | - u8 il4_proto, u32 *type_cs_vlan_tso, |
---|
584 | | - u32 *ol_type_vlan_len_msec) |
---|
585 | | -{ |
---|
586 | | - union { |
---|
587 | | - struct iphdr *v4; |
---|
588 | | - struct ipv6hdr *v6; |
---|
589 | | - unsigned char *hdr; |
---|
590 | | - } l3; |
---|
591 | | - union { |
---|
592 | | - struct tcphdr *tcp; |
---|
593 | | - struct udphdr *udp; |
---|
594 | | - struct gre_base_hdr *gre; |
---|
595 | | - unsigned char *hdr; |
---|
596 | | - } l4; |
---|
597 | | - unsigned char *l2_hdr; |
---|
598 | | - u8 l4_proto = ol4_proto; |
---|
599 | | - u32 ol2_len; |
---|
600 | | - u32 ol3_len; |
---|
601 | | - u32 ol4_len; |
---|
602 | | - u32 l2_len; |
---|
603 | | - u32 l3_len; |
---|
604 | | - |
---|
605 | | - l3.hdr = skb_network_header(skb); |
---|
606 | | - l4.hdr = skb_transport_header(skb); |
---|
607 | | - |
---|
608 | | - /* compute L2 header size for normal packet, defined in 2 Bytes */ |
---|
609 | | - l2_len = l3.hdr - skb->data; |
---|
610 | | - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, |
---|
611 | | - HNS3_TXD_L2LEN_S, l2_len >> 1); |
---|
612 | | - |
---|
613 | | - /* tunnel packet*/ |
---|
614 | | - if (skb->encapsulation) { |
---|
615 | | - /* compute OL2 header size, defined in 2 Bytes */ |
---|
616 | | - ol2_len = l2_len; |
---|
617 | | - hnae3_set_field(*ol_type_vlan_len_msec, |
---|
618 | | - HNS3_TXD_L2LEN_M, |
---|
619 | | - HNS3_TXD_L2LEN_S, ol2_len >> 1); |
---|
620 | | - |
---|
621 | | - /* compute OL3 header size, defined in 4 Bytes */ |
---|
622 | | - ol3_len = l4.hdr - l3.hdr; |
---|
623 | | - hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, |
---|
624 | | - HNS3_TXD_L3LEN_S, ol3_len >> 2); |
---|
625 | | - |
---|
626 | | - /* MAC in UDP, MAC in GRE (0x6558)*/ |
---|
627 | | - if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { |
---|
628 | | - /* switch MAC header ptr from outer to inner header.*/ |
---|
629 | | - l2_hdr = skb_inner_mac_header(skb); |
---|
630 | | - |
---|
631 | | - /* compute OL4 header size, defined in 4 Bytes. */ |
---|
632 | | - ol4_len = l2_hdr - l4.hdr; |
---|
633 | | - hnae3_set_field(*ol_type_vlan_len_msec, |
---|
634 | | - HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, |
---|
635 | | - ol4_len >> 2); |
---|
636 | | - |
---|
637 | | - /* switch IP header ptr from outer to inner header */ |
---|
638 | | - l3.hdr = skb_inner_network_header(skb); |
---|
639 | | - |
---|
640 | | - /* compute inner l2 header size, defined in 2 Bytes. */ |
---|
641 | | - l2_len = l3.hdr - l2_hdr; |
---|
642 | | - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, |
---|
643 | | - HNS3_TXD_L2LEN_S, l2_len >> 1); |
---|
644 | | - } else { |
---|
645 | | - /* skb packet types not supported by hardware, |
---|
646 | | - * txbd len fild doesn't be filled. |
---|
647 | | - */ |
---|
648 | | - return; |
---|
649 | | - } |
---|
650 | | - |
---|
651 | | - /* switch L4 header pointer from outer to inner */ |
---|
652 | | - l4.hdr = skb_inner_transport_header(skb); |
---|
653 | | - |
---|
654 | | - l4_proto = il4_proto; |
---|
655 | | - } |
---|
656 | | - |
---|
657 | | - /* compute inner(/normal) L3 header size, defined in 4 Bytes */ |
---|
658 | | - l3_len = l4.hdr - l3.hdr; |
---|
659 | | - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, |
---|
660 | | - HNS3_TXD_L3LEN_S, l3_len >> 2); |
---|
661 | | - |
---|
662 | | - /* compute inner(/normal) L4 header size, defined in 4 Bytes */ |
---|
663 | | - switch (l4_proto) { |
---|
664 | | - case IPPROTO_TCP: |
---|
665 | | - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, |
---|
666 | | - HNS3_TXD_L4LEN_S, l4.tcp->doff); |
---|
667 | | - break; |
---|
668 | | - case IPPROTO_SCTP: |
---|
669 | | - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, |
---|
670 | | - HNS3_TXD_L4LEN_S, |
---|
671 | | - (sizeof(struct sctphdr) >> 2)); |
---|
672 | | - break; |
---|
673 | | - case IPPROTO_UDP: |
---|
674 | | - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, |
---|
675 | | - HNS3_TXD_L4LEN_S, |
---|
676 | | - (sizeof(struct udphdr) >> 2)); |
---|
677 | | - break; |
---|
678 | | - default: |
---|
679 | | - /* skb packet types not supported by hardware, |
---|
680 | | - * txbd len fild doesn't be filled. |
---|
681 | | - */ |
---|
682 | | - return; |
---|
683 | | - } |
---|
684 | | -} |
---|
685 | | - |
---|
686 | 783 | /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL |
---|
687 | 784 | * and it is udp packet, which has a dest port as the IANA assigned. |
---|
688 | 785 | * the hardware is expected to do the checksum offload, but the |
---|
689 | 786 | * hardware will not do the checksum offload when udp dest port is |
---|
690 | | - * 4789. |
---|
| 787 | + * 4789, 4790 or 6081. |
---|
691 | 788 | */ |
---|
692 | 789 | static bool hns3_tunnel_csum_bug(struct sk_buff *skb) |
---|
693 | 790 | { |
---|
694 | | -#define IANA_VXLAN_PORT 4789 |
---|
695 | | - union { |
---|
696 | | - struct tcphdr *tcp; |
---|
697 | | - struct udphdr *udp; |
---|
698 | | - struct gre_base_hdr *gre; |
---|
699 | | - unsigned char *hdr; |
---|
700 | | - } l4; |
---|
| 791 | + union l4_hdr_info l4; |
---|
701 | 792 | |
---|
702 | 793 | l4.hdr = skb_transport_header(skb); |
---|
703 | 794 | |
---|
704 | | - if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT))) |
---|
| 795 | + if (!(!skb->encapsulation && |
---|
| 796 | + (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || |
---|
| 797 | + l4.udp->dest == htons(GENEVE_UDP_PORT) || |
---|
| 798 | + l4.udp->dest == htons(4790)))) |
---|
705 | 799 | return false; |
---|
706 | 800 | |
---|
707 | 801 | return true; |
---|
708 | 802 | } |
---|
709 | 803 | |
---|
710 | | -static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, |
---|
711 | | - u8 il4_proto, u32 *type_cs_vlan_tso, |
---|
712 | | - u32 *ol_type_vlan_len_msec) |
---|
| 804 | +static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, |
---|
| 805 | + u32 *ol_type_vlan_len_msec) |
---|
713 | 806 | { |
---|
714 | | - union { |
---|
715 | | - struct iphdr *v4; |
---|
716 | | - struct ipv6hdr *v6; |
---|
717 | | - unsigned char *hdr; |
---|
718 | | - } l3; |
---|
719 | | - u32 l4_proto = ol4_proto; |
---|
| 807 | + u32 l2_len, l3_len, l4_len; |
---|
| 808 | + unsigned char *il2_hdr; |
---|
| 809 | + union l3_hdr_info l3; |
---|
| 810 | + union l4_hdr_info l4; |
---|
720 | 811 | |
---|
721 | 812 | l3.hdr = skb_network_header(skb); |
---|
| 813 | + l4.hdr = skb_transport_header(skb); |
---|
722 | 814 | |
---|
723 | | - /* define OL3 type and tunnel type(OL4).*/ |
---|
| 815 | + /* compute OL2 header size, defined in 2 Bytes */ |
---|
| 816 | + l2_len = l3.hdr - skb->data; |
---|
| 817 | + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); |
---|
| 818 | + |
---|
| 819 | + /* compute OL3 header size, defined in 4 Bytes */ |
---|
| 820 | + l3_len = l4.hdr - l3.hdr; |
---|
| 821 | + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); |
---|
| 822 | + |
---|
| 823 | + il2_hdr = skb_inner_mac_header(skb); |
---|
| 824 | + /* compute OL4 header size, defined in 4 Bytes */ |
---|
| 825 | + l4_len = il2_hdr - l4.hdr; |
---|
| 826 | + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); |
---|
| 827 | + |
---|
| 828 | + /* define outer network header type */ |
---|
| 829 | + if (skb->protocol == htons(ETH_P_IP)) { |
---|
| 830 | + if (skb_is_gso(skb)) |
---|
| 831 | + hns3_set_field(*ol_type_vlan_len_msec, |
---|
| 832 | + HNS3_TXD_OL3T_S, |
---|
| 833 | + HNS3_OL3T_IPV4_CSUM); |
---|
| 834 | + else |
---|
| 835 | + hns3_set_field(*ol_type_vlan_len_msec, |
---|
| 836 | + HNS3_TXD_OL3T_S, |
---|
| 837 | + HNS3_OL3T_IPV4_NO_CSUM); |
---|
| 838 | + |
---|
| 839 | + } else if (skb->protocol == htons(ETH_P_IPV6)) { |
---|
| 840 | + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, |
---|
| 841 | + HNS3_OL3T_IPV6); |
---|
| 842 | + } |
---|
| 843 | + |
---|
| 844 | + if (ol4_proto == IPPROTO_UDP) |
---|
| 845 | + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, |
---|
| 846 | + HNS3_TUN_MAC_IN_UDP); |
---|
| 847 | + else if (ol4_proto == IPPROTO_GRE) |
---|
| 848 | + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, |
---|
| 849 | + HNS3_TUN_NVGRE); |
---|
| 850 | +} |
---|
| 851 | + |
---|
| 852 | +static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, |
---|
| 853 | + u8 il4_proto, u32 *type_cs_vlan_tso, |
---|
| 854 | + u32 *ol_type_vlan_len_msec) |
---|
| 855 | +{ |
---|
| 856 | + unsigned char *l2_hdr = skb->data; |
---|
| 857 | + u32 l4_proto = ol4_proto; |
---|
| 858 | + union l4_hdr_info l4; |
---|
| 859 | + union l3_hdr_info l3; |
---|
| 860 | + u32 l2_len, l3_len; |
---|
| 861 | + |
---|
| 862 | + l4.hdr = skb_transport_header(skb); |
---|
| 863 | + l3.hdr = skb_network_header(skb); |
---|
| 864 | + |
---|
| 865 | + /* handle encapsulation skb */ |
---|
724 | 866 | if (skb->encapsulation) { |
---|
725 | | - /* define outer network header type.*/ |
---|
726 | | - if (skb->protocol == htons(ETH_P_IP)) { |
---|
727 | | - if (skb_is_gso(skb)) |
---|
728 | | - hnae3_set_field(*ol_type_vlan_len_msec, |
---|
729 | | - HNS3_TXD_OL3T_M, |
---|
730 | | - HNS3_TXD_OL3T_S, |
---|
731 | | - HNS3_OL3T_IPV4_CSUM); |
---|
732 | | - else |
---|
733 | | - hnae3_set_field(*ol_type_vlan_len_msec, |
---|
734 | | - HNS3_TXD_OL3T_M, |
---|
735 | | - HNS3_TXD_OL3T_S, |
---|
736 | | - HNS3_OL3T_IPV4_NO_CSUM); |
---|
737 | | - |
---|
738 | | - } else if (skb->protocol == htons(ETH_P_IPV6)) { |
---|
739 | | - hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, |
---|
740 | | - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); |
---|
741 | | - } |
---|
742 | | - |
---|
743 | | - /* define tunnel type(OL4).*/ |
---|
744 | | - switch (l4_proto) { |
---|
745 | | - case IPPROTO_UDP: |
---|
746 | | - hnae3_set_field(*ol_type_vlan_len_msec, |
---|
747 | | - HNS3_TXD_TUNTYPE_M, |
---|
748 | | - HNS3_TXD_TUNTYPE_S, |
---|
749 | | - HNS3_TUN_MAC_IN_UDP); |
---|
750 | | - break; |
---|
751 | | - case IPPROTO_GRE: |
---|
752 | | - hnae3_set_field(*ol_type_vlan_len_msec, |
---|
753 | | - HNS3_TXD_TUNTYPE_M, |
---|
754 | | - HNS3_TXD_TUNTYPE_S, |
---|
755 | | - HNS3_TUN_NVGRE); |
---|
756 | | - break; |
---|
757 | | - default: |
---|
| 867 | + /* If this is a not UDP/GRE encapsulation skb */ |
---|
| 868 | + if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { |
---|
758 | 869 | /* drop the skb tunnel packet if hardware don't support, |
---|
759 | 870 | * because hardware can't calculate csum when TSO. |
---|
760 | 871 | */ |
---|
.. | .. |
---|
767 | 878 | return skb_checksum_help(skb); |
---|
768 | 879 | } |
---|
769 | 880 | |
---|
| 881 | + hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); |
---|
| 882 | + |
---|
| 883 | + /* switch to inner header */ |
---|
| 884 | + l2_hdr = skb_inner_mac_header(skb); |
---|
770 | 885 | l3.hdr = skb_inner_network_header(skb); |
---|
| 886 | + l4.hdr = skb_inner_transport_header(skb); |
---|
771 | 887 | l4_proto = il4_proto; |
---|
772 | 888 | } |
---|
773 | 889 | |
---|
774 | 890 | if (l3.v4->version == 4) { |
---|
775 | | - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, |
---|
776 | | - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); |
---|
| 891 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, |
---|
| 892 | + HNS3_L3T_IPV4); |
---|
777 | 893 | |
---|
778 | 894 | /* the stack computes the IP header already, the only time we |
---|
779 | 895 | * need the hardware to recompute it is in the case of TSO. |
---|
780 | 896 | */ |
---|
781 | 897 | if (skb_is_gso(skb)) |
---|
782 | | - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); |
---|
| 898 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); |
---|
783 | 899 | } else if (l3.v6->version == 6) { |
---|
784 | | - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, |
---|
785 | | - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); |
---|
| 900 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, |
---|
| 901 | + HNS3_L3T_IPV6); |
---|
786 | 902 | } |
---|
787 | 903 | |
---|
| 904 | + /* compute inner(/normal) L2 header size, defined in 2 Bytes */ |
---|
| 905 | + l2_len = l3.hdr - l2_hdr; |
---|
| 906 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); |
---|
| 907 | + |
---|
| 908 | + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ |
---|
| 909 | + l3_len = l4.hdr - l3.hdr; |
---|
| 910 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); |
---|
| 911 | + |
---|
| 912 | + /* compute inner(/normal) L4 header size, defined in 4 Bytes */ |
---|
788 | 913 | switch (l4_proto) { |
---|
789 | 914 | case IPPROTO_TCP: |
---|
790 | | - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); |
---|
791 | | - hnae3_set_field(*type_cs_vlan_tso, |
---|
792 | | - HNS3_TXD_L4T_M, |
---|
793 | | - HNS3_TXD_L4T_S, |
---|
794 | | - HNS3_L4T_TCP); |
---|
| 915 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); |
---|
| 916 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, |
---|
| 917 | + HNS3_L4T_TCP); |
---|
| 918 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, |
---|
| 919 | + l4.tcp->doff); |
---|
795 | 920 | break; |
---|
796 | 921 | case IPPROTO_UDP: |
---|
797 | 922 | if (hns3_tunnel_csum_bug(skb)) { |
---|
.. | .. |
---|
800 | 925 | return ret ? ret : skb_checksum_help(skb); |
---|
801 | 926 | } |
---|
802 | 927 | |
---|
803 | | - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); |
---|
804 | | - hnae3_set_field(*type_cs_vlan_tso, |
---|
805 | | - HNS3_TXD_L4T_M, |
---|
806 | | - HNS3_TXD_L4T_S, |
---|
807 | | - HNS3_L4T_UDP); |
---|
| 928 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); |
---|
| 929 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, |
---|
| 930 | + HNS3_L4T_UDP); |
---|
| 931 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, |
---|
| 932 | + (sizeof(struct udphdr) >> 2)); |
---|
808 | 933 | break; |
---|
809 | 934 | case IPPROTO_SCTP: |
---|
810 | | - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); |
---|
811 | | - hnae3_set_field(*type_cs_vlan_tso, |
---|
812 | | - HNS3_TXD_L4T_M, |
---|
813 | | - HNS3_TXD_L4T_S, |
---|
814 | | - HNS3_L4T_SCTP); |
---|
| 935 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); |
---|
| 936 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, |
---|
| 937 | + HNS3_L4T_SCTP); |
---|
| 938 | + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, |
---|
| 939 | + (sizeof(struct sctphdr) >> 2)); |
---|
815 | 940 | break; |
---|
816 | 941 | default: |
---|
817 | 942 | /* drop the skb tunnel packet if hardware don't support, |
---|
.. | .. |
---|
829 | 954 | return 0; |
---|
830 | 955 | } |
---|
831 | 956 | |
---|
832 | | -static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) |
---|
| 957 | +static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, |
---|
| 958 | + struct sk_buff *skb) |
---|
833 | 959 | { |
---|
834 | | - /* Config bd buffer end */ |
---|
835 | | - hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, |
---|
836 | | - HNS3_TXD_BDTYPE_S, 0); |
---|
837 | | - hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); |
---|
838 | | - hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); |
---|
839 | | - hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); |
---|
840 | | -} |
---|
| 960 | + struct hnae3_handle *handle = tx_ring->tqp->handle; |
---|
| 961 | + struct vlan_ethhdr *vhdr; |
---|
| 962 | + int rc; |
---|
841 | 963 | |
---|
842 | | -static int hns3_fill_desc_vtags(struct sk_buff *skb, |
---|
843 | | - struct hns3_enet_ring *tx_ring, |
---|
844 | | - u32 *inner_vlan_flag, |
---|
845 | | - u32 *out_vlan_flag, |
---|
846 | | - u16 *inner_vtag, |
---|
847 | | - u16 *out_vtag) |
---|
848 | | -{ |
---|
849 | | -#define HNS3_TX_VLAN_PRIO_SHIFT 13 |
---|
| 964 | + if (!(skb->protocol == htons(ETH_P_8021Q) || |
---|
| 965 | + skb_vlan_tag_present(skb))) |
---|
| 966 | + return 0; |
---|
| 967 | + |
---|
| 968 | + /* Since HW limitation, if port based insert VLAN enabled, only one VLAN |
---|
| 969 | + * header is allowed in skb, otherwise it will cause RAS error. |
---|
| 970 | + */ |
---|
| 971 | + if (unlikely(skb_vlan_tagged_multi(skb) && |
---|
| 972 | + handle->port_base_vlan_state == |
---|
| 973 | + HNAE3_PORT_BASE_VLAN_ENABLE)) |
---|
| 974 | + return -EINVAL; |
---|
850 | 975 | |
---|
851 | 976 | if (skb->protocol == htons(ETH_P_8021Q) && |
---|
852 | | - !(tx_ring->tqp->handle->kinfo.netdev->features & |
---|
853 | | - NETIF_F_HW_VLAN_CTAG_TX)) { |
---|
| 977 | + !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { |
---|
854 | 978 | /* When HW VLAN acceleration is turned off, and the stack |
---|
855 | 979 | * sets the protocol to 802.1q, the driver just need to |
---|
856 | 980 | * set the protocol to the encapsulated ethertype. |
---|
.. | .. |
---|
860 | 984 | } |
---|
861 | 985 | |
---|
862 | 986 | if (skb_vlan_tag_present(skb)) { |
---|
863 | | - u16 vlan_tag; |
---|
864 | | - |
---|
865 | | - vlan_tag = skb_vlan_tag_get(skb); |
---|
866 | | - vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; |
---|
867 | | - |
---|
868 | 987 | /* Based on hw strategy, use out_vtag in two layer tag case, |
---|
869 | 988 | * and use inner_vtag in one tag case. |
---|
870 | 989 | */ |
---|
871 | | - if (skb->protocol == htons(ETH_P_8021Q)) { |
---|
872 | | - hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); |
---|
873 | | - *out_vtag = vlan_tag; |
---|
874 | | - } else { |
---|
875 | | - hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); |
---|
876 | | - *inner_vtag = vlan_tag; |
---|
877 | | - } |
---|
878 | | - } else if (skb->protocol == htons(ETH_P_8021Q)) { |
---|
879 | | - struct vlan_ethhdr *vhdr; |
---|
880 | | - int rc; |
---|
| 990 | + if (skb->protocol == htons(ETH_P_8021Q) && |
---|
| 991 | + handle->port_base_vlan_state == |
---|
| 992 | + HNAE3_PORT_BASE_VLAN_DISABLE) |
---|
| 993 | + rc = HNS3_OUTER_VLAN_TAG; |
---|
| 994 | + else |
---|
| 995 | + rc = HNS3_INNER_VLAN_TAG; |
---|
881 | 996 | |
---|
882 | | - rc = skb_cow_head(skb, 0); |
---|
883 | | - if (rc < 0) |
---|
884 | | - return rc; |
---|
885 | | - vhdr = (struct vlan_ethhdr *)skb->data; |
---|
886 | | - vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) |
---|
887 | | - << HNS3_TX_VLAN_PRIO_SHIFT); |
---|
| 997 | + skb->protocol = vlan_get_protocol(skb); |
---|
| 998 | + return rc; |
---|
888 | 999 | } |
---|
| 1000 | + |
---|
| 1001 | + rc = skb_cow_head(skb, 0); |
---|
| 1002 | + if (unlikely(rc < 0)) |
---|
| 1003 | + return rc; |
---|
| 1004 | + |
---|
| 1005 | + vhdr = (struct vlan_ethhdr *)skb->data; |
---|
| 1006 | + vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) |
---|
| 1007 | + & VLAN_PRIO_MASK); |
---|
889 | 1008 | |
---|
890 | 1009 | skb->protocol = vlan_get_protocol(skb); |
---|
891 | 1010 | return 0; |
---|
892 | 1011 | } |
---|
893 | 1012 | |
---|
894 | | -static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, |
---|
895 | | - int size, dma_addr_t dma, int frag_end, |
---|
896 | | - enum hns_desc_type type) |
---|
| 1013 | +static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, |
---|
| 1014 | + struct sk_buff *skb, struct hns3_desc *desc) |
---|
897 | 1015 | { |
---|
898 | | - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; |
---|
899 | | - struct hns3_desc *desc = &ring->desc[ring->next_to_use]; |
---|
900 | 1016 | u32 ol_type_vlan_len_msec = 0; |
---|
901 | | - u16 bdtp_fe_sc_vld_ra_ri = 0; |
---|
902 | 1017 | u32 type_cs_vlan_tso = 0; |
---|
903 | | - struct sk_buff *skb; |
---|
| 1018 | + u32 paylen = skb->len; |
---|
904 | 1019 | u16 inner_vtag = 0; |
---|
905 | 1020 | u16 out_vtag = 0; |
---|
906 | | - u32 paylen = 0; |
---|
907 | 1021 | u16 mss = 0; |
---|
908 | | - u8 ol4_proto; |
---|
909 | | - u8 il4_proto; |
---|
910 | 1022 | int ret; |
---|
911 | 1023 | |
---|
912 | | - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ |
---|
| 1024 | + ret = hns3_handle_vtags(ring, skb); |
---|
| 1025 | + if (unlikely(ret < 0)) { |
---|
| 1026 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1027 | + ring->stats.tx_vlan_err++; |
---|
| 1028 | + u64_stats_update_end(&ring->syncp); |
---|
| 1029 | + return ret; |
---|
| 1030 | + } else if (ret == HNS3_INNER_VLAN_TAG) { |
---|
| 1031 | + inner_vtag = skb_vlan_tag_get(skb); |
---|
| 1032 | + inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & |
---|
| 1033 | + VLAN_PRIO_MASK; |
---|
| 1034 | + hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); |
---|
| 1035 | + } else if (ret == HNS3_OUTER_VLAN_TAG) { |
---|
| 1036 | + out_vtag = skb_vlan_tag_get(skb); |
---|
| 1037 | + out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & |
---|
| 1038 | + VLAN_PRIO_MASK; |
---|
| 1039 | + hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, |
---|
| 1040 | + 1); |
---|
| 1041 | + } |
---|
| 1042 | + |
---|
| 1043 | + if (skb->ip_summed == CHECKSUM_PARTIAL) { |
---|
| 1044 | + u8 ol4_proto, il4_proto; |
---|
| 1045 | + |
---|
| 1046 | + skb_reset_mac_len(skb); |
---|
| 1047 | + |
---|
| 1048 | + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); |
---|
| 1049 | + if (unlikely(ret < 0)) { |
---|
| 1050 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1051 | + ring->stats.tx_l4_proto_err++; |
---|
| 1052 | + u64_stats_update_end(&ring->syncp); |
---|
| 1053 | + return ret; |
---|
| 1054 | + } |
---|
| 1055 | + |
---|
| 1056 | + ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, |
---|
| 1057 | + &type_cs_vlan_tso, |
---|
| 1058 | + &ol_type_vlan_len_msec); |
---|
| 1059 | + if (unlikely(ret < 0)) { |
---|
| 1060 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1061 | + ring->stats.tx_l2l3l4_err++; |
---|
| 1062 | + u64_stats_update_end(&ring->syncp); |
---|
| 1063 | + return ret; |
---|
| 1064 | + } |
---|
| 1065 | + |
---|
| 1066 | + ret = hns3_set_tso(skb, &paylen, &mss, |
---|
| 1067 | + &type_cs_vlan_tso); |
---|
| 1068 | + if (unlikely(ret < 0)) { |
---|
| 1069 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1070 | + ring->stats.tx_tso_err++; |
---|
| 1071 | + u64_stats_update_end(&ring->syncp); |
---|
| 1072 | + return ret; |
---|
| 1073 | + } |
---|
| 1074 | + } |
---|
| 1075 | + |
---|
| 1076 | + /* Set txbd */ |
---|
| 1077 | + desc->tx.ol_type_vlan_len_msec = |
---|
| 1078 | + cpu_to_le32(ol_type_vlan_len_msec); |
---|
| 1079 | + desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); |
---|
| 1080 | + desc->tx.paylen = cpu_to_le32(paylen); |
---|
| 1081 | + desc->tx.mss = cpu_to_le16(mss); |
---|
| 1082 | + desc->tx.vlan_tag = cpu_to_le16(inner_vtag); |
---|
| 1083 | + desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); |
---|
| 1084 | + |
---|
| 1085 | + return 0; |
---|
| 1086 | +} |
---|
| 1087 | + |
---|
| 1088 | +static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, |
---|
| 1089 | + unsigned int size, enum hns_desc_type type) |
---|
| 1090 | +{ |
---|
| 1091 | +#define HNS3_LIKELY_BD_NUM 1 |
---|
| 1092 | + |
---|
| 1093 | + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; |
---|
| 1094 | + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; |
---|
| 1095 | + struct device *dev = ring_to_dev(ring); |
---|
| 1096 | + skb_frag_t *frag; |
---|
| 1097 | + unsigned int frag_buf_num; |
---|
| 1098 | + int k, sizeoflast; |
---|
| 1099 | + dma_addr_t dma; |
---|
| 1100 | + |
---|
| 1101 | + if (type == DESC_TYPE_FRAGLIST_SKB || |
---|
| 1102 | + type == DESC_TYPE_SKB) { |
---|
| 1103 | + struct sk_buff *skb = (struct sk_buff *)priv; |
---|
| 1104 | + |
---|
| 1105 | + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); |
---|
| 1106 | + } else { |
---|
| 1107 | + frag = (skb_frag_t *)priv; |
---|
| 1108 | + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); |
---|
| 1109 | + } |
---|
| 1110 | + |
---|
| 1111 | + if (unlikely(dma_mapping_error(dev, dma))) { |
---|
| 1112 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1113 | + ring->stats.sw_err_cnt++; |
---|
| 1114 | + u64_stats_update_end(&ring->syncp); |
---|
| 1115 | + return -ENOMEM; |
---|
| 1116 | + } |
---|
| 1117 | + |
---|
913 | 1118 | desc_cb->priv = priv; |
---|
914 | 1119 | desc_cb->length = size; |
---|
915 | 1120 | desc_cb->dma = dma; |
---|
916 | 1121 | desc_cb->type = type; |
---|
917 | 1122 | |
---|
918 | | - /* now, fill the descriptor */ |
---|
919 | | - desc->addr = cpu_to_le64(dma); |
---|
920 | | - desc->tx.send_size = cpu_to_le16((u16)size); |
---|
921 | | - hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); |
---|
922 | | - desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); |
---|
| 1123 | + if (likely(size <= HNS3_MAX_BD_SIZE)) { |
---|
| 1124 | + desc->addr = cpu_to_le64(dma); |
---|
| 1125 | + desc->tx.send_size = cpu_to_le16(size); |
---|
| 1126 | + desc->tx.bdtp_fe_sc_vld_ra_ri = |
---|
| 1127 | + cpu_to_le16(BIT(HNS3_TXD_VLD_B)); |
---|
923 | 1128 | |
---|
924 | | - if (type == DESC_TYPE_SKB) { |
---|
925 | | - skb = (struct sk_buff *)priv; |
---|
926 | | - paylen = skb->len; |
---|
927 | | - |
---|
928 | | - ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, |
---|
929 | | - &ol_type_vlan_len_msec, |
---|
930 | | - &inner_vtag, &out_vtag); |
---|
931 | | - if (unlikely(ret)) |
---|
932 | | - return ret; |
---|
933 | | - |
---|
934 | | - if (skb->ip_summed == CHECKSUM_PARTIAL) { |
---|
935 | | - skb_reset_mac_len(skb); |
---|
936 | | - |
---|
937 | | - ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); |
---|
938 | | - if (ret) |
---|
939 | | - return ret; |
---|
940 | | - hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, |
---|
941 | | - &type_cs_vlan_tso, |
---|
942 | | - &ol_type_vlan_len_msec); |
---|
943 | | - ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, |
---|
944 | | - &type_cs_vlan_tso, |
---|
945 | | - &ol_type_vlan_len_msec); |
---|
946 | | - if (ret) |
---|
947 | | - return ret; |
---|
948 | | - |
---|
949 | | - ret = hns3_set_tso(skb, &paylen, &mss, |
---|
950 | | - &type_cs_vlan_tso); |
---|
951 | | - if (ret) |
---|
952 | | - return ret; |
---|
953 | | - } |
---|
954 | | - |
---|
955 | | - /* Set txbd */ |
---|
956 | | - desc->tx.ol_type_vlan_len_msec = |
---|
957 | | - cpu_to_le32(ol_type_vlan_len_msec); |
---|
958 | | - desc->tx.type_cs_vlan_tso_len = |
---|
959 | | - cpu_to_le32(type_cs_vlan_tso); |
---|
960 | | - desc->tx.paylen = cpu_to_le32(paylen); |
---|
961 | | - desc->tx.mss = cpu_to_le16(mss); |
---|
962 | | - desc->tx.vlan_tag = cpu_to_le16(inner_vtag); |
---|
963 | | - desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); |
---|
| 1129 | + trace_hns3_tx_desc(ring, ring->next_to_use); |
---|
| 1130 | + ring_ptr_move_fw(ring, next_to_use); |
---|
| 1131 | + return HNS3_LIKELY_BD_NUM; |
---|
964 | 1132 | } |
---|
965 | 1133 | |
---|
966 | | - /* move ring pointer to next.*/ |
---|
967 | | - ring_ptr_move_fw(ring, next_to_use); |
---|
968 | | - |
---|
969 | | - return 0; |
---|
970 | | -} |
---|
971 | | - |
---|
972 | | -static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, |
---|
973 | | - int size, dma_addr_t dma, int frag_end, |
---|
974 | | - enum hns_desc_type type) |
---|
975 | | -{ |
---|
976 | | - unsigned int frag_buf_num; |
---|
977 | | - unsigned int k; |
---|
978 | | - int sizeoflast; |
---|
979 | | - int ret; |
---|
980 | | - |
---|
981 | | - frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; |
---|
| 1134 | + frag_buf_num = hns3_tx_bd_count(size); |
---|
982 | 1135 | sizeoflast = size % HNS3_MAX_BD_SIZE; |
---|
983 | 1136 | sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; |
---|
984 | 1137 | |
---|
985 | | - /* When the frag size is bigger than hardware, split this frag */ |
---|
| 1138 | + /* When frag size is bigger than hardware limit, split this frag */ |
---|
986 | 1139 | for (k = 0; k < frag_buf_num; k++) { |
---|
987 | | - ret = hns3_fill_desc(ring, priv, |
---|
988 | | - (k == frag_buf_num - 1) ? |
---|
989 | | - sizeoflast : HNS3_MAX_BD_SIZE, |
---|
990 | | - dma + HNS3_MAX_BD_SIZE * k, |
---|
991 | | - frag_end && (k == frag_buf_num - 1) ? 1 : 0, |
---|
992 | | - (type == DESC_TYPE_SKB && !k) ? |
---|
993 | | - DESC_TYPE_SKB : DESC_TYPE_PAGE); |
---|
994 | | - if (ret) |
---|
995 | | - return ret; |
---|
| 1140 | + /* now, fill the descriptor */ |
---|
| 1141 | + desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); |
---|
| 1142 | + desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? |
---|
| 1143 | + (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); |
---|
| 1144 | + desc->tx.bdtp_fe_sc_vld_ra_ri = |
---|
| 1145 | + cpu_to_le16(BIT(HNS3_TXD_VLD_B)); |
---|
| 1146 | + |
---|
| 1147 | + trace_hns3_tx_desc(ring, ring->next_to_use); |
---|
| 1148 | + /* move ring pointer to next */ |
---|
| 1149 | + ring_ptr_move_fw(ring, next_to_use); |
---|
| 1150 | + |
---|
| 1151 | + desc = &ring->desc[ring->next_to_use]; |
---|
996 | 1152 | } |
---|
997 | 1153 | |
---|
998 | | - return 0; |
---|
| 1154 | + return frag_buf_num; |
---|
999 | 1155 | } |
---|
1000 | 1156 | |
---|
1001 | | -static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, |
---|
1002 | | - struct hns3_enet_ring *ring) |
---|
| 1157 | +static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, |
---|
| 1158 | + unsigned int bd_num) |
---|
1003 | 1159 | { |
---|
1004 | | - struct sk_buff *skb = *out_skb; |
---|
1005 | | - struct skb_frag_struct *frag; |
---|
1006 | | - int bdnum_for_frag; |
---|
1007 | | - int frag_num; |
---|
1008 | | - int buf_num; |
---|
1009 | | - int size; |
---|
| 1160 | + unsigned int size; |
---|
1010 | 1161 | int i; |
---|
1011 | 1162 | |
---|
1012 | 1163 | size = skb_headlen(skb); |
---|
1013 | | - buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; |
---|
| 1164 | + while (size > HNS3_MAX_BD_SIZE) { |
---|
| 1165 | + bd_size[bd_num++] = HNS3_MAX_BD_SIZE; |
---|
| 1166 | + size -= HNS3_MAX_BD_SIZE; |
---|
1014 | 1167 | |
---|
1015 | | - frag_num = skb_shinfo(skb)->nr_frags; |
---|
1016 | | - for (i = 0; i < frag_num; i++) { |
---|
1017 | | - frag = &skb_shinfo(skb)->frags[i]; |
---|
1018 | | - size = skb_frag_size(frag); |
---|
1019 | | - bdnum_for_frag = |
---|
1020 | | - (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; |
---|
1021 | | - if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) |
---|
1022 | | - return -ENOMEM; |
---|
1023 | | - |
---|
1024 | | - buf_num += bdnum_for_frag; |
---|
| 1168 | + if (bd_num > HNS3_MAX_TSO_BD_NUM) |
---|
| 1169 | + return bd_num; |
---|
1025 | 1170 | } |
---|
1026 | 1171 | |
---|
1027 | | - if (buf_num > ring_space(ring)) |
---|
1028 | | - return -EBUSY; |
---|
| 1172 | + if (size) { |
---|
| 1173 | + bd_size[bd_num++] = size; |
---|
| 1174 | + if (bd_num > HNS3_MAX_TSO_BD_NUM) |
---|
| 1175 | + return bd_num; |
---|
| 1176 | + } |
---|
1029 | 1177 | |
---|
1030 | | - *bnum = buf_num; |
---|
1031 | | - return 0; |
---|
| 1178 | + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
---|
| 1179 | + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
---|
| 1180 | + size = skb_frag_size(frag); |
---|
| 1181 | + if (!size) |
---|
| 1182 | + continue; |
---|
| 1183 | + |
---|
| 1184 | + while (size > HNS3_MAX_BD_SIZE) { |
---|
| 1185 | + bd_size[bd_num++] = HNS3_MAX_BD_SIZE; |
---|
| 1186 | + size -= HNS3_MAX_BD_SIZE; |
---|
| 1187 | + |
---|
| 1188 | + if (bd_num > HNS3_MAX_TSO_BD_NUM) |
---|
| 1189 | + return bd_num; |
---|
| 1190 | + } |
---|
| 1191 | + |
---|
| 1192 | + bd_size[bd_num++] = size; |
---|
| 1193 | + if (bd_num > HNS3_MAX_TSO_BD_NUM) |
---|
| 1194 | + return bd_num; |
---|
| 1195 | + } |
---|
| 1196 | + |
---|
| 1197 | + return bd_num; |
---|
1032 | 1198 | } |
---|
1033 | 1199 | |
---|
1034 | | -static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, |
---|
1035 | | - struct hns3_enet_ring *ring) |
---|
| 1200 | +static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, |
---|
| 1201 | + u8 max_non_tso_bd_num, unsigned int bd_num, |
---|
| 1202 | + unsigned int recursion_level) |
---|
1036 | 1203 | { |
---|
1037 | | - struct sk_buff *skb = *out_skb; |
---|
1038 | | - int buf_num; |
---|
| 1204 | +#define HNS3_MAX_RECURSION_LEVEL 24 |
---|
1039 | 1205 | |
---|
1040 | | - /* No. of segments (plus a header) */ |
---|
1041 | | - buf_num = skb_shinfo(skb)->nr_frags + 1; |
---|
| 1206 | + struct sk_buff *frag_skb; |
---|
1042 | 1207 | |
---|
1043 | | - if (buf_num > ring_space(ring)) |
---|
1044 | | - return -EBUSY; |
---|
| 1208 | + /* If the total len is within the max bd limit */ |
---|
| 1209 | + if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && |
---|
| 1210 | + !skb_has_frag_list(skb) && |
---|
| 1211 | + skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) |
---|
| 1212 | + return skb_shinfo(skb)->nr_frags + 1U; |
---|
1045 | 1213 | |
---|
1046 | | - *bnum = buf_num; |
---|
| 1214 | + if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL)) |
---|
| 1215 | + return UINT_MAX; |
---|
| 1216 | + |
---|
| 1217 | + bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); |
---|
| 1218 | + |
---|
| 1219 | + if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) |
---|
| 1220 | + return bd_num; |
---|
| 1221 | + |
---|
| 1222 | + skb_walk_frags(skb, frag_skb) { |
---|
| 1223 | + bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num, |
---|
| 1224 | + bd_num, recursion_level + 1); |
---|
| 1225 | + if (bd_num > HNS3_MAX_TSO_BD_NUM) |
---|
| 1226 | + return bd_num; |
---|
| 1227 | + } |
---|
| 1228 | + |
---|
| 1229 | + return bd_num; |
---|
| 1230 | +} |
---|
| 1231 | + |
---|
| 1232 | +static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) |
---|
| 1233 | +{ |
---|
| 1234 | + if (!skb->encapsulation) |
---|
| 1235 | + return skb_transport_offset(skb) + tcp_hdrlen(skb); |
---|
| 1236 | + |
---|
| 1237 | + return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); |
---|
| 1238 | +} |
---|
| 1239 | + |
---|
| 1240 | +/* HW need every continuous max_non_tso_bd_num buffer data to be larger |
---|
| 1241 | + * than MSS, we simplify it by ensuring skb_headlen + the first continuous |
---|
| 1242 | + * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss, |
---|
| 1243 | + * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger |
---|
| 1244 | + * than MSS except the last max_non_tso_bd_num - 1 frags. |
---|
| 1245 | + */ |
---|
| 1246 | +static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, |
---|
| 1247 | + unsigned int bd_num, u8 max_non_tso_bd_num) |
---|
| 1248 | +{ |
---|
| 1249 | + unsigned int tot_len = 0; |
---|
| 1250 | + int i; |
---|
| 1251 | + |
---|
| 1252 | + for (i = 0; i < max_non_tso_bd_num - 1U; i++) |
---|
| 1253 | + tot_len += bd_size[i]; |
---|
| 1254 | + |
---|
| 1255 | + /* ensure the first max_non_tso_bd_num frags is greater than |
---|
| 1256 | + * mss + header |
---|
| 1257 | + */ |
---|
| 1258 | + if (tot_len + bd_size[max_non_tso_bd_num - 1U] < |
---|
| 1259 | + skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) |
---|
| 1260 | + return true; |
---|
| 1261 | + |
---|
| 1262 | + /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater |
---|
| 1263 | + * than mss except the last one. |
---|
| 1264 | + */ |
---|
| 1265 | + for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { |
---|
| 1266 | + tot_len -= bd_size[i]; |
---|
| 1267 | + tot_len += bd_size[i + max_non_tso_bd_num - 1U]; |
---|
| 1268 | + |
---|
| 1269 | + if (tot_len < skb_shinfo(skb)->gso_size) |
---|
| 1270 | + return true; |
---|
| 1271 | + } |
---|
| 1272 | + |
---|
| 1273 | + return false; |
---|
| 1274 | +} |
---|
| 1275 | + |
---|
| 1276 | +void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) |
---|
| 1277 | +{ |
---|
| 1278 | + int i; |
---|
| 1279 | + |
---|
| 1280 | + for (i = 0; i < MAX_SKB_FRAGS; i++) |
---|
| 1281 | + size[i] = skb_frag_size(&shinfo->frags[i]); |
---|
| 1282 | +} |
---|
| 1283 | + |
---|
| 1284 | +static int hns3_skb_linearize(struct hns3_enet_ring *ring, |
---|
| 1285 | + struct sk_buff *skb, |
---|
| 1286 | + unsigned int bd_num) |
---|
| 1287 | +{ |
---|
| 1288 | + /* 'bd_num == UINT_MAX' means the skb' fraglist has a |
---|
| 1289 | + * recursion level of over HNS3_MAX_RECURSION_LEVEL. |
---|
| 1290 | + */ |
---|
| 1291 | + if (bd_num == UINT_MAX) { |
---|
| 1292 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1293 | + ring->stats.over_max_recursion++; |
---|
| 1294 | + u64_stats_update_end(&ring->syncp); |
---|
| 1295 | + return -ENOMEM; |
---|
| 1296 | + } |
---|
| 1297 | + |
---|
| 1298 | + /* The skb->len has exceeded the hw limitation, linearization |
---|
| 1299 | + * will not help. |
---|
| 1300 | + */ |
---|
| 1301 | + if (skb->len > HNS3_MAX_TSO_SIZE || |
---|
| 1302 | + (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { |
---|
| 1303 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1304 | + ring->stats.hw_limitation++; |
---|
| 1305 | + u64_stats_update_end(&ring->syncp); |
---|
| 1306 | + return -ENOMEM; |
---|
| 1307 | + } |
---|
| 1308 | + |
---|
| 1309 | + if (__skb_linearize(skb)) { |
---|
| 1310 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1311 | + ring->stats.sw_err_cnt++; |
---|
| 1312 | + u64_stats_update_end(&ring->syncp); |
---|
| 1313 | + return -ENOMEM; |
---|
| 1314 | + } |
---|
1047 | 1315 | |
---|
1048 | 1316 | return 0; |
---|
1049 | 1317 | } |
---|
1050 | 1318 | |
---|
1051 | | -static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) |
---|
| 1319 | +static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, |
---|
| 1320 | + struct net_device *netdev, |
---|
| 1321 | + struct sk_buff *skb) |
---|
| 1322 | +{ |
---|
| 1323 | + struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
| 1324 | + u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; |
---|
| 1325 | + unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; |
---|
| 1326 | + unsigned int bd_num; |
---|
| 1327 | + |
---|
| 1328 | + bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0); |
---|
| 1329 | + if (unlikely(bd_num > max_non_tso_bd_num)) { |
---|
| 1330 | + if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && |
---|
| 1331 | + !hns3_skb_need_linearized(skb, bd_size, bd_num, |
---|
| 1332 | + max_non_tso_bd_num)) { |
---|
| 1333 | + trace_hns3_over_max_bd(skb); |
---|
| 1334 | + goto out; |
---|
| 1335 | + } |
---|
| 1336 | + |
---|
| 1337 | + if (hns3_skb_linearize(ring, skb, bd_num)) |
---|
| 1338 | + return -ENOMEM; |
---|
| 1339 | + |
---|
| 1340 | + bd_num = hns3_tx_bd_count(skb->len); |
---|
| 1341 | + |
---|
| 1342 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1343 | + ring->stats.tx_copy++; |
---|
| 1344 | + u64_stats_update_end(&ring->syncp); |
---|
| 1345 | + } |
---|
| 1346 | + |
---|
| 1347 | +out: |
---|
| 1348 | + if (likely(ring_space(ring) >= bd_num)) |
---|
| 1349 | + return bd_num; |
---|
| 1350 | + |
---|
| 1351 | + netif_stop_subqueue(netdev, ring->queue_index); |
---|
| 1352 | + smp_mb(); /* Memory barrier before checking ring_space */ |
---|
| 1353 | + |
---|
| 1354 | + /* Start queue in case hns3_clean_tx_ring has just made room |
---|
| 1355 | + * available and has not seen the queue stopped state performed |
---|
| 1356 | + * by netif_stop_subqueue above. |
---|
| 1357 | + */ |
---|
| 1358 | + if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && |
---|
| 1359 | + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { |
---|
| 1360 | + netif_start_subqueue(netdev, ring->queue_index); |
---|
| 1361 | + return bd_num; |
---|
| 1362 | + } |
---|
| 1363 | + |
---|
| 1364 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1365 | + ring->stats.tx_busy++; |
---|
| 1366 | + u64_stats_update_end(&ring->syncp); |
---|
| 1367 | + |
---|
| 1368 | + return -EBUSY; |
---|
| 1369 | +} |
---|
| 1370 | + |
---|
| 1371 | +static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) |
---|
1052 | 1372 | { |
---|
1053 | 1373 | struct device *dev = ring_to_dev(ring); |
---|
1054 | 1374 | unsigned int i; |
---|
1055 | 1375 | |
---|
1056 | 1376 | for (i = 0; i < ring->desc_num; i++) { |
---|
| 1377 | + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; |
---|
| 1378 | + |
---|
| 1379 | + memset(desc, 0, sizeof(*desc)); |
---|
| 1380 | + |
---|
1057 | 1381 | /* check if this is where we started */ |
---|
1058 | 1382 | if (ring->next_to_use == next_to_use_orig) |
---|
1059 | 1383 | break; |
---|
1060 | 1384 | |
---|
| 1385 | + /* rollback one */ |
---|
| 1386 | + ring_ptr_move_bw(ring, next_to_use); |
---|
| 1387 | + |
---|
| 1388 | + if (!ring->desc_cb[ring->next_to_use].dma) |
---|
| 1389 | + continue; |
---|
| 1390 | + |
---|
1061 | 1391 | /* unmap the descriptor dma address */ |
---|
1062 | | - if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) |
---|
| 1392 | + if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || |
---|
| 1393 | + ring->desc_cb[ring->next_to_use].type == |
---|
| 1394 | + DESC_TYPE_FRAGLIST_SKB) |
---|
1063 | 1395 | dma_unmap_single(dev, |
---|
1064 | 1396 | ring->desc_cb[ring->next_to_use].dma, |
---|
1065 | 1397 | ring->desc_cb[ring->next_to_use].length, |
---|
1066 | 1398 | DMA_TO_DEVICE); |
---|
1067 | | - else |
---|
| 1399 | + else if (ring->desc_cb[ring->next_to_use].length) |
---|
1068 | 1400 | dma_unmap_page(dev, |
---|
1069 | 1401 | ring->desc_cb[ring->next_to_use].dma, |
---|
1070 | 1402 | ring->desc_cb[ring->next_to_use].length, |
---|
1071 | 1403 | DMA_TO_DEVICE); |
---|
1072 | 1404 | |
---|
1073 | | - /* rollback one */ |
---|
1074 | | - ring_ptr_move_bw(ring, next_to_use); |
---|
| 1405 | + ring->desc_cb[ring->next_to_use].length = 0; |
---|
| 1406 | + ring->desc_cb[ring->next_to_use].dma = 0; |
---|
| 1407 | + ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; |
---|
1075 | 1408 | } |
---|
| 1409 | +} |
---|
| 1410 | + |
---|
| 1411 | +static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, |
---|
| 1412 | + struct sk_buff *skb, enum hns_desc_type type) |
---|
| 1413 | +{ |
---|
| 1414 | + unsigned int size = skb_headlen(skb); |
---|
| 1415 | + struct sk_buff *frag_skb; |
---|
| 1416 | + int i, ret, bd_num = 0; |
---|
| 1417 | + |
---|
| 1418 | + if (size) { |
---|
| 1419 | + ret = hns3_fill_desc(ring, skb, size, type); |
---|
| 1420 | + if (unlikely(ret < 0)) |
---|
| 1421 | + return ret; |
---|
| 1422 | + |
---|
| 1423 | + bd_num += ret; |
---|
| 1424 | + } |
---|
| 1425 | + |
---|
| 1426 | + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
---|
| 1427 | + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
---|
| 1428 | + |
---|
| 1429 | + size = skb_frag_size(frag); |
---|
| 1430 | + if (!size) |
---|
| 1431 | + continue; |
---|
| 1432 | + |
---|
| 1433 | + ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE); |
---|
| 1434 | + if (unlikely(ret < 0)) |
---|
| 1435 | + return ret; |
---|
| 1436 | + |
---|
| 1437 | + bd_num += ret; |
---|
| 1438 | + } |
---|
| 1439 | + |
---|
| 1440 | + skb_walk_frags(skb, frag_skb) { |
---|
| 1441 | + ret = hns3_fill_skb_to_desc(ring, frag_skb, |
---|
| 1442 | + DESC_TYPE_FRAGLIST_SKB); |
---|
| 1443 | + if (unlikely(ret < 0)) |
---|
| 1444 | + return ret; |
---|
| 1445 | + |
---|
| 1446 | + bd_num += ret; |
---|
| 1447 | + } |
---|
| 1448 | + |
---|
| 1449 | + return bd_num; |
---|
| 1450 | +} |
---|
| 1451 | + |
---|
| 1452 | +static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, |
---|
| 1453 | + bool doorbell) |
---|
| 1454 | +{ |
---|
| 1455 | + ring->pending_buf += num; |
---|
| 1456 | + |
---|
| 1457 | + if (!doorbell) { |
---|
| 1458 | + u64_stats_update_begin(&ring->syncp); |
---|
| 1459 | + ring->stats.tx_more++; |
---|
| 1460 | + u64_stats_update_end(&ring->syncp); |
---|
| 1461 | + return; |
---|
| 1462 | + } |
---|
| 1463 | + |
---|
| 1464 | + if (!ring->pending_buf) |
---|
| 1465 | + return; |
---|
| 1466 | + |
---|
| 1467 | + writel(ring->pending_buf, |
---|
| 1468 | + ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); |
---|
| 1469 | + ring->pending_buf = 0; |
---|
| 1470 | + WRITE_ONCE(ring->last_to_use, ring->next_to_use); |
---|
1076 | 1471 | } |
---|
1077 | 1472 | |
---|
1078 | 1473 | netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) |
---|
1079 | 1474 | { |
---|
1080 | 1475 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
1081 | | - struct hns3_nic_ring_data *ring_data = |
---|
1082 | | - &tx_ring_data(priv, skb->queue_mapping); |
---|
1083 | | - struct hns3_enet_ring *ring = ring_data->ring; |
---|
1084 | | - struct device *dev = priv->dev; |
---|
| 1476 | + struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; |
---|
1085 | 1477 | struct netdev_queue *dev_queue; |
---|
1086 | | - struct skb_frag_struct *frag; |
---|
1087 | | - int next_to_use_head; |
---|
1088 | | - int next_to_use_frag; |
---|
1089 | | - dma_addr_t dma; |
---|
1090 | | - int buf_num; |
---|
1091 | | - int seg_num; |
---|
1092 | | - int size; |
---|
| 1478 | + int pre_ntu, next_to_use_head; |
---|
| 1479 | + bool doorbell; |
---|
1093 | 1480 | int ret; |
---|
1094 | | - int i; |
---|
| 1481 | + |
---|
| 1482 | + /* Hardware can only handle short frames above 32 bytes */ |
---|
| 1483 | + if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { |
---|
| 1484 | + hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); |
---|
| 1485 | + return NETDEV_TX_OK; |
---|
| 1486 | + } |
---|
1095 | 1487 | |
---|
1096 | 1488 | /* Prefetch the data used later */ |
---|
1097 | 1489 | prefetch(skb->data); |
---|
1098 | 1490 | |
---|
1099 | | - switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { |
---|
1100 | | - case -EBUSY: |
---|
1101 | | - u64_stats_update_begin(&ring->syncp); |
---|
1102 | | - ring->stats.tx_busy++; |
---|
1103 | | - u64_stats_update_end(&ring->syncp); |
---|
| 1491 | + ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); |
---|
| 1492 | + if (unlikely(ret <= 0)) { |
---|
| 1493 | + if (ret == -EBUSY) { |
---|
| 1494 | + hns3_tx_doorbell(ring, 0, true); |
---|
| 1495 | + return NETDEV_TX_BUSY; |
---|
| 1496 | + } |
---|
1104 | 1497 | |
---|
1105 | | - goto out_net_tx_busy; |
---|
1106 | | - case -ENOMEM: |
---|
1107 | | - u64_stats_update_begin(&ring->syncp); |
---|
1108 | | - ring->stats.sw_err_cnt++; |
---|
1109 | | - u64_stats_update_end(&ring->syncp); |
---|
1110 | | - netdev_err(netdev, "no memory to xmit!\n"); |
---|
1111 | | - |
---|
| 1498 | + hns3_rl_err(netdev, "xmit error: %d!\n", ret); |
---|
1112 | 1499 | goto out_err_tx_ok; |
---|
1113 | | - default: |
---|
1114 | | - break; |
---|
1115 | 1500 | } |
---|
1116 | | - |
---|
1117 | | - /* No. of segments (plus a header) */ |
---|
1118 | | - seg_num = skb_shinfo(skb)->nr_frags + 1; |
---|
1119 | | - /* Fill the first part */ |
---|
1120 | | - size = skb_headlen(skb); |
---|
1121 | 1501 | |
---|
1122 | 1502 | next_to_use_head = ring->next_to_use; |
---|
1123 | 1503 | |
---|
1124 | | - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); |
---|
1125 | | - if (dma_mapping_error(dev, dma)) { |
---|
1126 | | - netdev_err(netdev, "TX head DMA map failed\n"); |
---|
1127 | | - ring->stats.sw_err_cnt++; |
---|
1128 | | - goto out_err_tx_ok; |
---|
1129 | | - } |
---|
| 1504 | + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); |
---|
| 1505 | + if (unlikely(ret < 0)) |
---|
| 1506 | + goto fill_err; |
---|
1130 | 1507 | |
---|
1131 | | - ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, |
---|
1132 | | - DESC_TYPE_SKB); |
---|
1133 | | - if (ret) |
---|
1134 | | - goto head_dma_map_err; |
---|
| 1508 | + /* 'ret < 0' means filling error, 'ret == 0' means skb->len is |
---|
| 1509 | + * zero, which is unlikely, and 'ret > 0' means how many tx desc |
---|
| 1510 | + * need to be notified to the hw. |
---|
| 1511 | + */ |
---|
| 1512 | + ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); |
---|
| 1513 | + if (unlikely(ret <= 0)) |
---|
| 1514 | + goto fill_err; |
---|
1135 | 1515 | |
---|
1136 | | - next_to_use_frag = ring->next_to_use; |
---|
1137 | | - /* Fill the fragments */ |
---|
1138 | | - for (i = 1; i < seg_num; i++) { |
---|
1139 | | - frag = &skb_shinfo(skb)->frags[i - 1]; |
---|
1140 | | - size = skb_frag_size(frag); |
---|
1141 | | - dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); |
---|
1142 | | - if (dma_mapping_error(dev, dma)) { |
---|
1143 | | - netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); |
---|
1144 | | - ring->stats.sw_err_cnt++; |
---|
1145 | | - goto frag_dma_map_err; |
---|
1146 | | - } |
---|
1147 | | - ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, |
---|
1148 | | - seg_num - 1 == i ? 1 : 0, |
---|
1149 | | - DESC_TYPE_PAGE); |
---|
1150 | | - |
---|
1151 | | - if (ret) |
---|
1152 | | - goto frag_dma_map_err; |
---|
1153 | | - } |
---|
| 1516 | + pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : |
---|
| 1517 | + (ring->desc_num - 1); |
---|
| 1518 | + ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= |
---|
| 1519 | + cpu_to_le16(BIT(HNS3_TXD_FE_B)); |
---|
| 1520 | + trace_hns3_tx_desc(ring, pre_ntu); |
---|
1154 | 1521 | |
---|
1155 | 1522 | /* Complete translate all packets */ |
---|
1156 | | - dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); |
---|
1157 | | - netdev_tx_sent_queue(dev_queue, skb->len); |
---|
1158 | | - |
---|
1159 | | - wmb(); /* Commit all data before submit */ |
---|
1160 | | - |
---|
1161 | | - hnae3_queue_xmit(ring->tqp, buf_num); |
---|
| 1523 | + dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); |
---|
| 1524 | + doorbell = __netdev_tx_sent_queue(dev_queue, skb->len, |
---|
| 1525 | + netdev_xmit_more()); |
---|
| 1526 | + hns3_tx_doorbell(ring, ret, doorbell); |
---|
1162 | 1527 | |
---|
1163 | 1528 | return NETDEV_TX_OK; |
---|
1164 | 1529 | |
---|
1165 | | -frag_dma_map_err: |
---|
1166 | | - hns_nic_dma_unmap(ring, next_to_use_frag); |
---|
1167 | | - |
---|
1168 | | -head_dma_map_err: |
---|
1169 | | - hns_nic_dma_unmap(ring, next_to_use_head); |
---|
| 1530 | +fill_err: |
---|
| 1531 | + hns3_clear_desc(ring, next_to_use_head); |
---|
1170 | 1532 | |
---|
1171 | 1533 | out_err_tx_ok: |
---|
1172 | 1534 | dev_kfree_skb_any(skb); |
---|
| 1535 | + hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); |
---|
1173 | 1536 | return NETDEV_TX_OK; |
---|
1174 | | - |
---|
1175 | | -out_net_tx_busy: |
---|
1176 | | - netif_stop_subqueue(netdev, ring_data->queue_index); |
---|
1177 | | - smp_mb(); /* Commit all data before submit */ |
---|
1178 | | - |
---|
1179 | | - return NETDEV_TX_BUSY; |
---|
1180 | 1537 | } |
---|
1181 | 1538 | |
---|
1182 | 1539 | static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) |
---|
.. | .. |
---|
1194 | 1551 | return 0; |
---|
1195 | 1552 | } |
---|
1196 | 1553 | |
---|
| 1554 | + /* For VF device, if there is a perm_addr, then the user will not |
---|
| 1555 | + * be allowed to change the address. |
---|
| 1556 | + */ |
---|
| 1557 | + if (!hns3_is_phys_func(h->pdev) && |
---|
| 1558 | + !is_zero_ether_addr(netdev->perm_addr)) { |
---|
| 1559 | + netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", |
---|
| 1560 | + netdev->perm_addr, mac_addr->sa_data); |
---|
| 1561 | + return -EPERM; |
---|
| 1562 | + } |
---|
| 1563 | + |
---|
1197 | 1564 | ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); |
---|
1198 | 1565 | if (ret) { |
---|
1199 | 1566 | netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); |
---|
.. | .. |
---|
1205 | 1572 | return 0; |
---|
1206 | 1573 | } |
---|
1207 | 1574 | |
---|
| 1575 | +static int hns3_nic_do_ioctl(struct net_device *netdev, |
---|
| 1576 | + struct ifreq *ifr, int cmd) |
---|
| 1577 | +{ |
---|
| 1578 | + struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
| 1579 | + |
---|
| 1580 | + if (!netif_running(netdev)) |
---|
| 1581 | + return -EINVAL; |
---|
| 1582 | + |
---|
| 1583 | + if (!h->ae_algo->ops->do_ioctl) |
---|
| 1584 | + return -EOPNOTSUPP; |
---|
| 1585 | + |
---|
| 1586 | + return h->ae_algo->ops->do_ioctl(h, ifr, cmd); |
---|
| 1587 | +} |
---|
| 1588 | + |
---|
1208 | 1589 | static int hns3_nic_set_features(struct net_device *netdev, |
---|
1209 | 1590 | netdev_features_t features) |
---|
1210 | 1591 | { |
---|
1211 | 1592 | netdev_features_t changed = netdev->features ^ features; |
---|
1212 | 1593 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
1213 | 1594 | struct hnae3_handle *h = priv->ae_handle; |
---|
| 1595 | + bool enable; |
---|
1214 | 1596 | int ret; |
---|
1215 | 1597 | |
---|
1216 | | - if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { |
---|
1217 | | - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { |
---|
1218 | | - priv->ops.fill_desc = hns3_fill_desc_tso; |
---|
1219 | | - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; |
---|
1220 | | - } else { |
---|
1221 | | - priv->ops.fill_desc = hns3_fill_desc; |
---|
1222 | | - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; |
---|
1223 | | - } |
---|
1224 | | - } |
---|
1225 | | - |
---|
1226 | | - if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && |
---|
1227 | | - h->ae_algo->ops->enable_vlan_filter) { |
---|
1228 | | - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) |
---|
1229 | | - h->ae_algo->ops->enable_vlan_filter(h, true); |
---|
1230 | | - else |
---|
1231 | | - h->ae_algo->ops->enable_vlan_filter(h, false); |
---|
1232 | | - } |
---|
1233 | | - |
---|
1234 | | - if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && |
---|
1235 | | - h->ae_algo->ops->enable_hw_strip_rxvtag) { |
---|
1236 | | - if (features & NETIF_F_HW_VLAN_CTAG_RX) |
---|
1237 | | - ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); |
---|
1238 | | - else |
---|
1239 | | - ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); |
---|
1240 | | - |
---|
| 1598 | + if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { |
---|
| 1599 | + enable = !!(features & NETIF_F_GRO_HW); |
---|
| 1600 | + ret = h->ae_algo->ops->set_gro_en(h, enable); |
---|
1241 | 1601 | if (ret) |
---|
1242 | 1602 | return ret; |
---|
1243 | 1603 | } |
---|
1244 | 1604 | |
---|
| 1605 | + if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && |
---|
| 1606 | + h->ae_algo->ops->enable_hw_strip_rxvtag) { |
---|
| 1607 | + enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); |
---|
| 1608 | + ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); |
---|
| 1609 | + if (ret) |
---|
| 1610 | + return ret; |
---|
| 1611 | + } |
---|
| 1612 | + |
---|
| 1613 | + if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { |
---|
| 1614 | + enable = !!(features & NETIF_F_NTUPLE); |
---|
| 1615 | + h->ae_algo->ops->enable_fd(h, enable); |
---|
| 1616 | + } |
---|
| 1617 | + |
---|
1245 | 1618 | netdev->features = features; |
---|
1246 | 1619 | return 0; |
---|
| 1620 | +} |
---|
| 1621 | + |
---|
| 1622 | +static netdev_features_t hns3_features_check(struct sk_buff *skb, |
---|
| 1623 | + struct net_device *dev, |
---|
| 1624 | + netdev_features_t features) |
---|
| 1625 | +{ |
---|
| 1626 | +#define HNS3_MAX_HDR_LEN 480U |
---|
| 1627 | +#define HNS3_MAX_L4_HDR_LEN 60U |
---|
| 1628 | + |
---|
| 1629 | + size_t len; |
---|
| 1630 | + |
---|
| 1631 | + if (skb->ip_summed != CHECKSUM_PARTIAL) |
---|
| 1632 | + return features; |
---|
| 1633 | + |
---|
| 1634 | + if (skb->encapsulation) |
---|
| 1635 | + len = skb_inner_transport_header(skb) - skb->data; |
---|
| 1636 | + else |
---|
| 1637 | + len = skb_transport_header(skb) - skb->data; |
---|
| 1638 | + |
---|
| 1639 | + /* Assume L4 is 60 byte as TCP is the only protocol with a |
---|
| 1640 | + * a flexible value, and it's max len is 60 bytes. |
---|
| 1641 | + */ |
---|
| 1642 | + len += HNS3_MAX_L4_HDR_LEN; |
---|
| 1643 | + |
---|
| 1644 | + /* Hardware only supports checksum on the skb with a max header |
---|
| 1645 | + * len of 480 bytes. |
---|
| 1646 | + */ |
---|
| 1647 | + if (len > HNS3_MAX_HDR_LEN) |
---|
| 1648 | + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
---|
| 1649 | + |
---|
| 1650 | + return features; |
---|
1247 | 1651 | } |
---|
1248 | 1652 | |
---|
1249 | 1653 | static void hns3_nic_get_stats64(struct net_device *netdev, |
---|
.. | .. |
---|
1253 | 1657 | int queue_num = priv->ae_handle->kinfo.num_tqps; |
---|
1254 | 1658 | struct hnae3_handle *handle = priv->ae_handle; |
---|
1255 | 1659 | struct hns3_enet_ring *ring; |
---|
| 1660 | + u64 rx_length_errors = 0; |
---|
| 1661 | + u64 rx_crc_errors = 0; |
---|
| 1662 | + u64 rx_multicast = 0; |
---|
1256 | 1663 | unsigned int start; |
---|
| 1664 | + u64 tx_errors = 0; |
---|
| 1665 | + u64 rx_errors = 0; |
---|
1257 | 1666 | unsigned int idx; |
---|
1258 | 1667 | u64 tx_bytes = 0; |
---|
1259 | 1668 | u64 rx_bytes = 0; |
---|
.. | .. |
---|
1269 | 1678 | |
---|
1270 | 1679 | for (idx = 0; idx < queue_num; idx++) { |
---|
1271 | 1680 | /* fetch the tx stats */ |
---|
1272 | | - ring = priv->ring_data[idx].ring; |
---|
| 1681 | + ring = &priv->ring[idx]; |
---|
1273 | 1682 | do { |
---|
1274 | 1683 | start = u64_stats_fetch_begin_irq(&ring->syncp); |
---|
1275 | 1684 | tx_bytes += ring->stats.tx_bytes; |
---|
1276 | 1685 | tx_pkts += ring->stats.tx_pkts; |
---|
1277 | | - tx_drop += ring->stats.tx_busy; |
---|
1278 | 1686 | tx_drop += ring->stats.sw_err_cnt; |
---|
| 1687 | + tx_drop += ring->stats.tx_vlan_err; |
---|
| 1688 | + tx_drop += ring->stats.tx_l4_proto_err; |
---|
| 1689 | + tx_drop += ring->stats.tx_l2l3l4_err; |
---|
| 1690 | + tx_drop += ring->stats.tx_tso_err; |
---|
| 1691 | + tx_drop += ring->stats.over_max_recursion; |
---|
| 1692 | + tx_drop += ring->stats.hw_limitation; |
---|
| 1693 | + tx_errors += ring->stats.sw_err_cnt; |
---|
| 1694 | + tx_errors += ring->stats.tx_vlan_err; |
---|
| 1695 | + tx_errors += ring->stats.tx_l4_proto_err; |
---|
| 1696 | + tx_errors += ring->stats.tx_l2l3l4_err; |
---|
| 1697 | + tx_errors += ring->stats.tx_tso_err; |
---|
| 1698 | + tx_errors += ring->stats.over_max_recursion; |
---|
| 1699 | + tx_errors += ring->stats.hw_limitation; |
---|
1279 | 1700 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); |
---|
1280 | 1701 | |
---|
1281 | 1702 | /* fetch the rx stats */ |
---|
1282 | | - ring = priv->ring_data[idx + queue_num].ring; |
---|
| 1703 | + ring = &priv->ring[idx + queue_num]; |
---|
1283 | 1704 | do { |
---|
1284 | 1705 | start = u64_stats_fetch_begin_irq(&ring->syncp); |
---|
1285 | 1706 | rx_bytes += ring->stats.rx_bytes; |
---|
1286 | 1707 | rx_pkts += ring->stats.rx_pkts; |
---|
1287 | | - rx_drop += ring->stats.non_vld_descs; |
---|
1288 | | - rx_drop += ring->stats.err_pkt_len; |
---|
1289 | 1708 | rx_drop += ring->stats.l2_err; |
---|
| 1709 | + rx_errors += ring->stats.l2_err; |
---|
| 1710 | + rx_errors += ring->stats.l3l4_csum_err; |
---|
| 1711 | + rx_crc_errors += ring->stats.l2_err; |
---|
| 1712 | + rx_multicast += ring->stats.rx_multicast; |
---|
| 1713 | + rx_length_errors += ring->stats.err_pkt_len; |
---|
1290 | 1714 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); |
---|
1291 | 1715 | } |
---|
1292 | 1716 | |
---|
.. | .. |
---|
1295 | 1719 | stats->rx_bytes = rx_bytes; |
---|
1296 | 1720 | stats->rx_packets = rx_pkts; |
---|
1297 | 1721 | |
---|
1298 | | - stats->rx_errors = netdev->stats.rx_errors; |
---|
1299 | | - stats->multicast = netdev->stats.multicast; |
---|
1300 | | - stats->rx_length_errors = netdev->stats.rx_length_errors; |
---|
1301 | | - stats->rx_crc_errors = netdev->stats.rx_crc_errors; |
---|
| 1722 | + stats->rx_errors = rx_errors; |
---|
| 1723 | + stats->multicast = rx_multicast; |
---|
| 1724 | + stats->rx_length_errors = rx_length_errors; |
---|
| 1725 | + stats->rx_crc_errors = rx_crc_errors; |
---|
1302 | 1726 | stats->rx_missed_errors = netdev->stats.rx_missed_errors; |
---|
1303 | 1727 | |
---|
1304 | | - stats->tx_errors = netdev->stats.tx_errors; |
---|
1305 | | - stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; |
---|
1306 | | - stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; |
---|
| 1728 | + stats->tx_errors = tx_errors; |
---|
| 1729 | + stats->rx_dropped = rx_drop; |
---|
| 1730 | + stats->tx_dropped = tx_drop; |
---|
1307 | 1731 | stats->collisions = netdev->stats.collisions; |
---|
1308 | 1732 | stats->rx_over_errors = netdev->stats.rx_over_errors; |
---|
1309 | 1733 | stats->rx_frame_errors = netdev->stats.rx_frame_errors; |
---|
.. | .. |
---|
1320 | 1744 | static int hns3_setup_tc(struct net_device *netdev, void *type_data) |
---|
1321 | 1745 | { |
---|
1322 | 1746 | struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; |
---|
1323 | | - struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
1324 | | - struct hnae3_knic_private_info *kinfo = &h->kinfo; |
---|
1325 | 1747 | u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; |
---|
| 1748 | + struct hnae3_knic_private_info *kinfo; |
---|
1326 | 1749 | u8 tc = mqprio_qopt->qopt.num_tc; |
---|
1327 | 1750 | u16 mode = mqprio_qopt->mode; |
---|
1328 | 1751 | u8 hw = mqprio_qopt->qopt.hw; |
---|
1329 | | - bool if_running; |
---|
1330 | | - int ret; |
---|
| 1752 | + struct hnae3_handle *h; |
---|
1331 | 1753 | |
---|
1332 | 1754 | if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && |
---|
1333 | 1755 | mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) |
---|
.. | .. |
---|
1339 | 1761 | if (!netdev) |
---|
1340 | 1762 | return -EINVAL; |
---|
1341 | 1763 | |
---|
1342 | | - if_running = netif_running(netdev); |
---|
1343 | | - if (if_running) { |
---|
1344 | | - hns3_nic_net_stop(netdev); |
---|
1345 | | - msleep(100); |
---|
1346 | | - } |
---|
| 1764 | + h = hns3_get_handle(netdev); |
---|
| 1765 | + kinfo = &h->kinfo; |
---|
1347 | 1766 | |
---|
1348 | | - ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? |
---|
1349 | | - kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; |
---|
1350 | | - if (ret) |
---|
1351 | | - goto out; |
---|
| 1767 | + netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); |
---|
1352 | 1768 | |
---|
1353 | | - ret = hns3_nic_set_real_num_queue(netdev); |
---|
1354 | | - |
---|
1355 | | -out: |
---|
1356 | | - if (if_running) |
---|
1357 | | - hns3_nic_net_open(netdev); |
---|
1358 | | - |
---|
1359 | | - return ret; |
---|
| 1769 | + return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? |
---|
| 1770 | + kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; |
---|
1360 | 1771 | } |
---|
1361 | 1772 | |
---|
1362 | 1773 | static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, |
---|
.. | .. |
---|
1372 | 1783 | __be16 proto, u16 vid) |
---|
1373 | 1784 | { |
---|
1374 | 1785 | struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
1375 | | - struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
1376 | 1786 | int ret = -EIO; |
---|
1377 | 1787 | |
---|
1378 | 1788 | if (h->ae_algo->ops->set_vlan_filter) |
---|
1379 | 1789 | ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); |
---|
1380 | | - |
---|
1381 | | - if (!ret) |
---|
1382 | | - set_bit(vid, priv->active_vlans); |
---|
1383 | 1790 | |
---|
1384 | 1791 | return ret; |
---|
1385 | 1792 | } |
---|
.. | .. |
---|
1388 | 1795 | __be16 proto, u16 vid) |
---|
1389 | 1796 | { |
---|
1390 | 1797 | struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
1391 | | - struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
1392 | 1798 | int ret = -EIO; |
---|
1393 | 1799 | |
---|
1394 | 1800 | if (h->ae_algo->ops->set_vlan_filter) |
---|
1395 | 1801 | ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); |
---|
1396 | 1802 | |
---|
1397 | | - if (!ret) |
---|
1398 | | - clear_bit(vid, priv->active_vlans); |
---|
1399 | | - |
---|
1400 | 1803 | return ret; |
---|
1401 | | -} |
---|
1402 | | - |
---|
1403 | | -static void hns3_restore_vlan(struct net_device *netdev) |
---|
1404 | | -{ |
---|
1405 | | - struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
1406 | | - u16 vid; |
---|
1407 | | - int ret; |
---|
1408 | | - |
---|
1409 | | - for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { |
---|
1410 | | - ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); |
---|
1411 | | - if (ret) |
---|
1412 | | - netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n", |
---|
1413 | | - vid, ret); |
---|
1414 | | - } |
---|
1415 | 1804 | } |
---|
1416 | 1805 | |
---|
1417 | 1806 | static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, |
---|
.. | .. |
---|
1420 | 1809 | struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
1421 | 1810 | int ret = -EIO; |
---|
1422 | 1811 | |
---|
| 1812 | + netif_dbg(h, drv, netdev, |
---|
| 1813 | + "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", |
---|
| 1814 | + vf, vlan, qos, ntohs(vlan_proto)); |
---|
| 1815 | + |
---|
1423 | 1816 | if (h->ae_algo->ops->set_vf_vlan_filter) |
---|
1424 | 1817 | ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, |
---|
1425 | | - qos, vlan_proto); |
---|
| 1818 | + qos, vlan_proto); |
---|
1426 | 1819 | |
---|
1427 | 1820 | return ret; |
---|
| 1821 | +} |
---|
| 1822 | + |
---|
| 1823 | +static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) |
---|
| 1824 | +{ |
---|
| 1825 | + struct hnae3_handle *handle = hns3_get_handle(netdev); |
---|
| 1826 | + |
---|
| 1827 | + if (hns3_nic_resetting(netdev)) |
---|
| 1828 | + return -EBUSY; |
---|
| 1829 | + |
---|
| 1830 | + if (!handle->ae_algo->ops->set_vf_spoofchk) |
---|
| 1831 | + return -EOPNOTSUPP; |
---|
| 1832 | + |
---|
| 1833 | + return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); |
---|
| 1834 | +} |
---|
| 1835 | + |
---|
| 1836 | +static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) |
---|
| 1837 | +{ |
---|
| 1838 | + struct hnae3_handle *handle = hns3_get_handle(netdev); |
---|
| 1839 | + |
---|
| 1840 | + if (!handle->ae_algo->ops->set_vf_trust) |
---|
| 1841 | + return -EOPNOTSUPP; |
---|
| 1842 | + |
---|
| 1843 | + return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); |
---|
1428 | 1844 | } |
---|
1429 | 1845 | |
---|
1430 | 1846 | static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) |
---|
1431 | 1847 | { |
---|
1432 | 1848 | struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
1433 | | - bool if_running = netif_running(netdev); |
---|
1434 | 1849 | int ret; |
---|
| 1850 | + |
---|
| 1851 | + if (hns3_nic_resetting(netdev)) |
---|
| 1852 | + return -EBUSY; |
---|
1435 | 1853 | |
---|
1436 | 1854 | if (!h->ae_algo->ops->set_mtu) |
---|
1437 | 1855 | return -EOPNOTSUPP; |
---|
1438 | 1856 | |
---|
1439 | | - /* if this was called with netdev up then bring netdevice down */ |
---|
1440 | | - if (if_running) { |
---|
1441 | | - (void)hns3_nic_net_stop(netdev); |
---|
1442 | | - msleep(100); |
---|
1443 | | - } |
---|
| 1857 | + netif_dbg(h, drv, netdev, |
---|
| 1858 | + "change mtu from %u to %d\n", netdev->mtu, new_mtu); |
---|
1444 | 1859 | |
---|
1445 | 1860 | ret = h->ae_algo->ops->set_mtu(h, new_mtu); |
---|
1446 | 1861 | if (ret) |
---|
.. | .. |
---|
1449 | 1864 | else |
---|
1450 | 1865 | netdev->mtu = new_mtu; |
---|
1451 | 1866 | |
---|
1452 | | - /* if the netdev was running earlier, bring it up again */ |
---|
1453 | | - if (if_running && hns3_nic_net_open(netdev)) |
---|
1454 | | - ret = -EINVAL; |
---|
1455 | | - |
---|
1456 | 1867 | return ret; |
---|
1457 | 1868 | } |
---|
1458 | 1869 | |
---|
1459 | 1870 | static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) |
---|
1460 | 1871 | { |
---|
1461 | 1872 | struct hns3_nic_priv *priv = netdev_priv(ndev); |
---|
1462 | | - struct hns3_enet_ring *tx_ring = NULL; |
---|
| 1873 | + struct hnae3_handle *h = hns3_get_handle(ndev); |
---|
| 1874 | + struct hns3_enet_ring *tx_ring; |
---|
| 1875 | + struct napi_struct *napi; |
---|
1463 | 1876 | int timeout_queue = 0; |
---|
1464 | 1877 | int hw_head, hw_tail; |
---|
| 1878 | + int fbd_num, fbd_oft; |
---|
| 1879 | + int ebd_num, ebd_oft; |
---|
| 1880 | + int bd_num, bd_err; |
---|
| 1881 | + int ring_en, tc; |
---|
1465 | 1882 | int i; |
---|
1466 | 1883 | |
---|
1467 | 1884 | /* Find the stopped queue the same way the stack does */ |
---|
.. | .. |
---|
1489 | 1906 | return false; |
---|
1490 | 1907 | } |
---|
1491 | 1908 | |
---|
1492 | | - tx_ring = priv->ring_data[timeout_queue].ring; |
---|
| 1909 | + priv->tx_timeout_count++; |
---|
| 1910 | + |
---|
| 1911 | + tx_ring = &priv->ring[timeout_queue]; |
---|
| 1912 | + napi = &tx_ring->tqp_vector->napi; |
---|
| 1913 | + |
---|
| 1914 | + netdev_info(ndev, |
---|
| 1915 | + "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", |
---|
| 1916 | + priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, |
---|
| 1917 | + tx_ring->next_to_clean, napi->state); |
---|
| 1918 | + |
---|
| 1919 | + netdev_info(ndev, |
---|
| 1920 | + "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", |
---|
| 1921 | + tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, |
---|
| 1922 | + tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); |
---|
| 1923 | + |
---|
| 1924 | + netdev_info(ndev, |
---|
| 1925 | + "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", |
---|
| 1926 | + tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, |
---|
| 1927 | + tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); |
---|
| 1928 | + |
---|
| 1929 | + /* When mac received many pause frames continuous, it's unable to send |
---|
| 1930 | + * packets, which may cause tx timeout |
---|
| 1931 | + */ |
---|
| 1932 | + if (h->ae_algo->ops->get_mac_stats) { |
---|
| 1933 | + struct hns3_mac_stats mac_stats; |
---|
| 1934 | + |
---|
| 1935 | + h->ae_algo->ops->get_mac_stats(h, &mac_stats); |
---|
| 1936 | + netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", |
---|
| 1937 | + mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); |
---|
| 1938 | + } |
---|
1493 | 1939 | |
---|
1494 | 1940 | hw_head = readl_relaxed(tx_ring->tqp->io_base + |
---|
1495 | 1941 | HNS3_RING_TX_RING_HEAD_REG); |
---|
1496 | 1942 | hw_tail = readl_relaxed(tx_ring->tqp->io_base + |
---|
1497 | 1943 | HNS3_RING_TX_RING_TAIL_REG); |
---|
| 1944 | + fbd_num = readl_relaxed(tx_ring->tqp->io_base + |
---|
| 1945 | + HNS3_RING_TX_RING_FBDNUM_REG); |
---|
| 1946 | + fbd_oft = readl_relaxed(tx_ring->tqp->io_base + |
---|
| 1947 | + HNS3_RING_TX_RING_OFFSET_REG); |
---|
| 1948 | + ebd_num = readl_relaxed(tx_ring->tqp->io_base + |
---|
| 1949 | + HNS3_RING_TX_RING_EBDNUM_REG); |
---|
| 1950 | + ebd_oft = readl_relaxed(tx_ring->tqp->io_base + |
---|
| 1951 | + HNS3_RING_TX_RING_EBD_OFFSET_REG); |
---|
| 1952 | + bd_num = readl_relaxed(tx_ring->tqp->io_base + |
---|
| 1953 | + HNS3_RING_TX_RING_BD_NUM_REG); |
---|
| 1954 | + bd_err = readl_relaxed(tx_ring->tqp->io_base + |
---|
| 1955 | + HNS3_RING_TX_RING_BD_ERR_REG); |
---|
| 1956 | + ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); |
---|
| 1957 | + tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); |
---|
| 1958 | + |
---|
1498 | 1959 | netdev_info(ndev, |
---|
1499 | | - "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", |
---|
1500 | | - priv->tx_timeout_count, |
---|
1501 | | - timeout_queue, |
---|
1502 | | - tx_ring->next_to_use, |
---|
1503 | | - tx_ring->next_to_clean, |
---|
1504 | | - hw_head, |
---|
1505 | | - hw_tail, |
---|
| 1960 | + "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", |
---|
| 1961 | + bd_num, hw_head, hw_tail, bd_err, |
---|
1506 | 1962 | readl(tx_ring->tqp_vector->mask_addr)); |
---|
| 1963 | + netdev_info(ndev, |
---|
| 1964 | + "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", |
---|
| 1965 | + ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); |
---|
1507 | 1966 | |
---|
1508 | 1967 | return true; |
---|
1509 | 1968 | } |
---|
1510 | 1969 | |
---|
1511 | | -static void hns3_nic_net_timeout(struct net_device *ndev) |
---|
| 1970 | +static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) |
---|
1512 | 1971 | { |
---|
1513 | 1972 | struct hns3_nic_priv *priv = netdev_priv(ndev); |
---|
1514 | 1973 | struct hnae3_handle *h = priv->ae_handle; |
---|
.. | .. |
---|
1516 | 1975 | if (!hns3_get_tx_timeo_queue_info(ndev)) |
---|
1517 | 1976 | return; |
---|
1518 | 1977 | |
---|
1519 | | - priv->tx_timeout_count++; |
---|
1520 | | - |
---|
1521 | | - if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo))) |
---|
1522 | | - return; |
---|
1523 | | - |
---|
1524 | | - /* request the reset */ |
---|
| 1978 | + /* request the reset, and let the hclge to determine |
---|
| 1979 | + * which reset level should be done |
---|
| 1980 | + */ |
---|
1525 | 1981 | if (h->ae_algo->ops->reset_event) |
---|
1526 | | - h->ae_algo->ops->reset_event(h); |
---|
| 1982 | + h->ae_algo->ops->reset_event(h->pdev, h); |
---|
| 1983 | +} |
---|
| 1984 | + |
---|
| 1985 | +#ifdef CONFIG_RFS_ACCEL |
---|
| 1986 | +static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, |
---|
| 1987 | + u16 rxq_index, u32 flow_id) |
---|
| 1988 | +{ |
---|
| 1989 | + struct hnae3_handle *h = hns3_get_handle(dev); |
---|
| 1990 | + struct flow_keys fkeys; |
---|
| 1991 | + |
---|
| 1992 | + if (!h->ae_algo->ops->add_arfs_entry) |
---|
| 1993 | + return -EOPNOTSUPP; |
---|
| 1994 | + |
---|
| 1995 | + if (skb->encapsulation) |
---|
| 1996 | + return -EPROTONOSUPPORT; |
---|
| 1997 | + |
---|
| 1998 | + if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) |
---|
| 1999 | + return -EPROTONOSUPPORT; |
---|
| 2000 | + |
---|
| 2001 | + if ((fkeys.basic.n_proto != htons(ETH_P_IP) && |
---|
| 2002 | + fkeys.basic.n_proto != htons(ETH_P_IPV6)) || |
---|
| 2003 | + (fkeys.basic.ip_proto != IPPROTO_TCP && |
---|
| 2004 | + fkeys.basic.ip_proto != IPPROTO_UDP)) |
---|
| 2005 | + return -EPROTONOSUPPORT; |
---|
| 2006 | + |
---|
| 2007 | + return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); |
---|
| 2008 | +} |
---|
| 2009 | +#endif |
---|
| 2010 | + |
---|
| 2011 | +static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, |
---|
| 2012 | + struct ifla_vf_info *ivf) |
---|
| 2013 | +{ |
---|
| 2014 | + struct hnae3_handle *h = hns3_get_handle(ndev); |
---|
| 2015 | + |
---|
| 2016 | + if (!h->ae_algo->ops->get_vf_config) |
---|
| 2017 | + return -EOPNOTSUPP; |
---|
| 2018 | + |
---|
| 2019 | + return h->ae_algo->ops->get_vf_config(h, vf, ivf); |
---|
| 2020 | +} |
---|
| 2021 | + |
---|
| 2022 | +static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, |
---|
| 2023 | + int link_state) |
---|
| 2024 | +{ |
---|
| 2025 | + struct hnae3_handle *h = hns3_get_handle(ndev); |
---|
| 2026 | + |
---|
| 2027 | + if (!h->ae_algo->ops->set_vf_link_state) |
---|
| 2028 | + return -EOPNOTSUPP; |
---|
| 2029 | + |
---|
| 2030 | + return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); |
---|
| 2031 | +} |
---|
| 2032 | + |
---|
| 2033 | +static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, |
---|
| 2034 | + int min_tx_rate, int max_tx_rate) |
---|
| 2035 | +{ |
---|
| 2036 | + struct hnae3_handle *h = hns3_get_handle(ndev); |
---|
| 2037 | + |
---|
| 2038 | + if (!h->ae_algo->ops->set_vf_rate) |
---|
| 2039 | + return -EOPNOTSUPP; |
---|
| 2040 | + |
---|
| 2041 | + return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, |
---|
| 2042 | + false); |
---|
| 2043 | +} |
---|
| 2044 | + |
---|
| 2045 | +static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) |
---|
| 2046 | +{ |
---|
| 2047 | + struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
| 2048 | + |
---|
| 2049 | + if (!h->ae_algo->ops->set_vf_mac) |
---|
| 2050 | + return -EOPNOTSUPP; |
---|
| 2051 | + |
---|
| 2052 | + if (is_multicast_ether_addr(mac)) { |
---|
| 2053 | + netdev_err(netdev, |
---|
| 2054 | + "Invalid MAC:%pM specified. Could not set MAC\n", |
---|
| 2055 | + mac); |
---|
| 2056 | + return -EINVAL; |
---|
| 2057 | + } |
---|
| 2058 | + |
---|
| 2059 | + return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); |
---|
1527 | 2060 | } |
---|
1528 | 2061 | |
---|
1529 | 2062 | static const struct net_device_ops hns3_nic_netdev_ops = { |
---|
.. | .. |
---|
1532 | 2065 | .ndo_start_xmit = hns3_nic_net_xmit, |
---|
1533 | 2066 | .ndo_tx_timeout = hns3_nic_net_timeout, |
---|
1534 | 2067 | .ndo_set_mac_address = hns3_nic_net_set_mac_address, |
---|
| 2068 | + .ndo_do_ioctl = hns3_nic_do_ioctl, |
---|
1535 | 2069 | .ndo_change_mtu = hns3_nic_change_mtu, |
---|
1536 | 2070 | .ndo_set_features = hns3_nic_set_features, |
---|
| 2071 | + .ndo_features_check = hns3_features_check, |
---|
1537 | 2072 | .ndo_get_stats64 = hns3_nic_get_stats64, |
---|
1538 | 2073 | .ndo_setup_tc = hns3_nic_setup_tc, |
---|
1539 | 2074 | .ndo_set_rx_mode = hns3_nic_set_rx_mode, |
---|
1540 | 2075 | .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, |
---|
1541 | 2076 | .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, |
---|
1542 | 2077 | .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, |
---|
| 2078 | + .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, |
---|
| 2079 | + .ndo_set_vf_trust = hns3_set_vf_trust, |
---|
| 2080 | +#ifdef CONFIG_RFS_ACCEL |
---|
| 2081 | + .ndo_rx_flow_steer = hns3_rx_flow_steer, |
---|
| 2082 | +#endif |
---|
| 2083 | + .ndo_get_vf_config = hns3_nic_get_vf_config, |
---|
| 2084 | + .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, |
---|
| 2085 | + .ndo_set_vf_rate = hns3_nic_set_vf_rate, |
---|
| 2086 | + .ndo_set_vf_mac = hns3_nic_set_vf_mac, |
---|
1543 | 2087 | }; |
---|
1544 | 2088 | |
---|
1545 | | -static bool hns3_is_phys_func(struct pci_dev *pdev) |
---|
| 2089 | +bool hns3_is_phys_func(struct pci_dev *pdev) |
---|
1546 | 2090 | { |
---|
1547 | 2091 | u32 dev_id = pdev->device; |
---|
1548 | 2092 | |
---|
.. | .. |
---|
1554 | 2098 | case HNAE3_DEV_ID_50GE_RDMA: |
---|
1555 | 2099 | case HNAE3_DEV_ID_50GE_RDMA_MACSEC: |
---|
1556 | 2100 | case HNAE3_DEV_ID_100G_RDMA_MACSEC: |
---|
| 2101 | + case HNAE3_DEV_ID_200G_RDMA: |
---|
1557 | 2102 | return true; |
---|
1558 | | - case HNAE3_DEV_ID_100G_VF: |
---|
1559 | | - case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: |
---|
| 2103 | + case HNAE3_DEV_ID_VF: |
---|
| 2104 | + case HNAE3_DEV_ID_RDMA_DCB_PFC_VF: |
---|
1560 | 2105 | return false; |
---|
1561 | 2106 | default: |
---|
1562 | | - dev_warn(&pdev->dev, "un-recognized pci device-id %d", |
---|
| 2107 | + dev_warn(&pdev->dev, "un-recognized pci device-id %u", |
---|
1563 | 2108 | dev_id); |
---|
1564 | 2109 | } |
---|
1565 | 2110 | |
---|
.. | .. |
---|
1596 | 2141 | struct hnae3_ae_dev *ae_dev; |
---|
1597 | 2142 | int ret; |
---|
1598 | 2143 | |
---|
1599 | | - ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), |
---|
1600 | | - GFP_KERNEL); |
---|
1601 | | - if (!ae_dev) { |
---|
1602 | | - ret = -ENOMEM; |
---|
1603 | | - return ret; |
---|
1604 | | - } |
---|
| 2144 | + ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); |
---|
| 2145 | + if (!ae_dev) |
---|
| 2146 | + return -ENOMEM; |
---|
1605 | 2147 | |
---|
1606 | 2148 | ae_dev->pdev = pdev; |
---|
1607 | 2149 | ae_dev->flag = ent->driver_data; |
---|
1608 | | - ae_dev->dev_type = HNAE3_DEV_KNIC; |
---|
1609 | 2150 | pci_set_drvdata(pdev, ae_dev); |
---|
1610 | 2151 | |
---|
1611 | 2152 | ret = hnae3_register_ae_dev(ae_dev); |
---|
1612 | | - if (ret) { |
---|
1613 | | - devm_kfree(&pdev->dev, ae_dev); |
---|
| 2153 | + if (ret) |
---|
1614 | 2154 | pci_set_drvdata(pdev, NULL); |
---|
1615 | | - } |
---|
1616 | 2155 | |
---|
1617 | 2156 | return ret; |
---|
1618 | 2157 | } |
---|
.. | .. |
---|
1664 | 2203 | return 0; |
---|
1665 | 2204 | } |
---|
1666 | 2205 | |
---|
| 2206 | +static void hns3_shutdown(struct pci_dev *pdev) |
---|
| 2207 | +{ |
---|
| 2208 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
---|
| 2209 | + |
---|
| 2210 | + hnae3_unregister_ae_dev(ae_dev); |
---|
| 2211 | + pci_set_drvdata(pdev, NULL); |
---|
| 2212 | + |
---|
| 2213 | + if (system_state == SYSTEM_POWER_OFF) |
---|
| 2214 | + pci_set_power_state(pdev, PCI_D3hot); |
---|
| 2215 | +} |
---|
| 2216 | + |
---|
| 2217 | +static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, |
---|
| 2218 | + pci_channel_state_t state) |
---|
| 2219 | +{ |
---|
| 2220 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
---|
| 2221 | + pci_ers_result_t ret; |
---|
| 2222 | + |
---|
| 2223 | + dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); |
---|
| 2224 | + |
---|
| 2225 | + if (state == pci_channel_io_perm_failure) |
---|
| 2226 | + return PCI_ERS_RESULT_DISCONNECT; |
---|
| 2227 | + |
---|
| 2228 | + if (!ae_dev || !ae_dev->ops) { |
---|
| 2229 | + dev_err(&pdev->dev, |
---|
| 2230 | + "Can't recover - error happened before device initialized\n"); |
---|
| 2231 | + return PCI_ERS_RESULT_NONE; |
---|
| 2232 | + } |
---|
| 2233 | + |
---|
| 2234 | + if (ae_dev->ops->handle_hw_ras_error) |
---|
| 2235 | + ret = ae_dev->ops->handle_hw_ras_error(ae_dev); |
---|
| 2236 | + else |
---|
| 2237 | + return PCI_ERS_RESULT_NONE; |
---|
| 2238 | + |
---|
| 2239 | + return ret; |
---|
| 2240 | +} |
---|
| 2241 | + |
---|
| 2242 | +static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) |
---|
| 2243 | +{ |
---|
| 2244 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
---|
| 2245 | + const struct hnae3_ae_ops *ops; |
---|
| 2246 | + enum hnae3_reset_type reset_type; |
---|
| 2247 | + struct device *dev = &pdev->dev; |
---|
| 2248 | + |
---|
| 2249 | + if (!ae_dev || !ae_dev->ops) |
---|
| 2250 | + return PCI_ERS_RESULT_NONE; |
---|
| 2251 | + |
---|
| 2252 | + ops = ae_dev->ops; |
---|
| 2253 | + /* request the reset */ |
---|
| 2254 | + if (ops->reset_event && ops->get_reset_level && |
---|
| 2255 | + ops->set_default_reset_request) { |
---|
| 2256 | + if (ae_dev->hw_err_reset_req) { |
---|
| 2257 | + reset_type = ops->get_reset_level(ae_dev, |
---|
| 2258 | + &ae_dev->hw_err_reset_req); |
---|
| 2259 | + ops->set_default_reset_request(ae_dev, reset_type); |
---|
| 2260 | + dev_info(dev, "requesting reset due to PCI error\n"); |
---|
| 2261 | + ops->reset_event(pdev, NULL); |
---|
| 2262 | + } |
---|
| 2263 | + |
---|
| 2264 | + return PCI_ERS_RESULT_RECOVERED; |
---|
| 2265 | + } |
---|
| 2266 | + |
---|
| 2267 | + return PCI_ERS_RESULT_DISCONNECT; |
---|
| 2268 | +} |
---|
| 2269 | + |
---|
| 2270 | +static void hns3_reset_prepare(struct pci_dev *pdev) |
---|
| 2271 | +{ |
---|
| 2272 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
---|
| 2273 | + |
---|
| 2274 | + dev_info(&pdev->dev, "FLR prepare\n"); |
---|
| 2275 | + if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) |
---|
| 2276 | + ae_dev->ops->flr_prepare(ae_dev); |
---|
| 2277 | +} |
---|
| 2278 | + |
---|
| 2279 | +static void hns3_reset_done(struct pci_dev *pdev) |
---|
| 2280 | +{ |
---|
| 2281 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
---|
| 2282 | + |
---|
| 2283 | + dev_info(&pdev->dev, "FLR done\n"); |
---|
| 2284 | + if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) |
---|
| 2285 | + ae_dev->ops->flr_done(ae_dev); |
---|
| 2286 | +} |
---|
| 2287 | + |
---|
| 2288 | +static const struct pci_error_handlers hns3_err_handler = { |
---|
| 2289 | + .error_detected = hns3_error_detected, |
---|
| 2290 | + .slot_reset = hns3_slot_reset, |
---|
| 2291 | + .reset_prepare = hns3_reset_prepare, |
---|
| 2292 | + .reset_done = hns3_reset_done, |
---|
| 2293 | +}; |
---|
| 2294 | + |
---|
1667 | 2295 | static struct pci_driver hns3_driver = { |
---|
1668 | 2296 | .name = hns3_driver_name, |
---|
1669 | 2297 | .id_table = hns3_pci_tbl, |
---|
1670 | 2298 | .probe = hns3_probe, |
---|
1671 | 2299 | .remove = hns3_remove, |
---|
| 2300 | + .shutdown = hns3_shutdown, |
---|
1672 | 2301 | .sriov_configure = hns3_pci_sriov_configure, |
---|
| 2302 | + .err_handler = &hns3_err_handler, |
---|
1673 | 2303 | }; |
---|
1674 | 2304 | |
---|
1675 | 2305 | /* set default feature to hns3 */ |
---|
.. | .. |
---|
1677 | 2307 | { |
---|
1678 | 2308 | struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
1679 | 2309 | struct pci_dev *pdev = h->pdev; |
---|
| 2310 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
---|
1680 | 2311 | |
---|
1681 | 2312 | netdev->priv_flags |= IFF_UNICAST_FLT; |
---|
1682 | 2313 | |
---|
.. | .. |
---|
1684 | 2315 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | |
---|
1685 | 2316 | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | |
---|
1686 | 2317 | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | |
---|
1687 | | - NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
1688 | | - |
---|
1689 | | - netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; |
---|
| 2318 | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | |
---|
| 2319 | + NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; |
---|
1690 | 2320 | |
---|
1691 | 2321 | netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; |
---|
1692 | 2322 | |
---|
.. | .. |
---|
1696 | 2326 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | |
---|
1697 | 2327 | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | |
---|
1698 | 2328 | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | |
---|
1699 | | - NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
| 2329 | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | |
---|
| 2330 | + NETIF_F_FRAGLIST; |
---|
1700 | 2331 | |
---|
1701 | 2332 | netdev->vlan_features |= |
---|
1702 | 2333 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | |
---|
1703 | 2334 | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | |
---|
1704 | 2335 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | |
---|
1705 | 2336 | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | |
---|
1706 | | - NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
| 2337 | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | |
---|
| 2338 | + NETIF_F_FRAGLIST; |
---|
1707 | 2339 | |
---|
1708 | 2340 | netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
---|
1709 | 2341 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | |
---|
1710 | 2342 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | |
---|
1711 | 2343 | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | |
---|
1712 | 2344 | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | |
---|
1713 | | - NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
| 2345 | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | |
---|
| 2346 | + NETIF_F_FRAGLIST; |
---|
1714 | 2347 | |
---|
1715 | | - if (pdev->revision != 0x20) |
---|
1716 | | - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
---|
| 2348 | + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { |
---|
| 2349 | + netdev->hw_features |= NETIF_F_GRO_HW; |
---|
| 2350 | + netdev->features |= NETIF_F_GRO_HW; |
---|
| 2351 | + |
---|
| 2352 | + if (!(h->flags & HNAE3_SUPPORT_VF)) { |
---|
| 2353 | + netdev->hw_features |= NETIF_F_NTUPLE; |
---|
| 2354 | + netdev->features |= NETIF_F_NTUPLE; |
---|
| 2355 | + } |
---|
| 2356 | + } |
---|
| 2357 | + |
---|
| 2358 | + if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) { |
---|
| 2359 | + netdev->hw_features |= NETIF_F_GSO_UDP_L4; |
---|
| 2360 | + netdev->features |= NETIF_F_GSO_UDP_L4; |
---|
| 2361 | + netdev->vlan_features |= NETIF_F_GSO_UDP_L4; |
---|
| 2362 | + netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4; |
---|
| 2363 | + } |
---|
1717 | 2364 | } |
---|
1718 | 2365 | |
---|
1719 | 2366 | static int hns3_alloc_buffer(struct hns3_enet_ring *ring, |
---|
1720 | 2367 | struct hns3_desc_cb *cb) |
---|
1721 | 2368 | { |
---|
1722 | | - unsigned int order = hnae3_page_order(ring); |
---|
| 2369 | + unsigned int order = hns3_page_order(ring); |
---|
1723 | 2370 | struct page *p; |
---|
1724 | 2371 | |
---|
1725 | 2372 | p = dev_alloc_pages(order); |
---|
.. | .. |
---|
1730 | 2377 | cb->page_offset = 0; |
---|
1731 | 2378 | cb->reuse_flag = 0; |
---|
1732 | 2379 | cb->buf = page_address(p); |
---|
1733 | | - cb->length = hnae3_page_size(ring); |
---|
| 2380 | + cb->length = hns3_page_size(ring); |
---|
1734 | 2381 | cb->type = DESC_TYPE_PAGE; |
---|
| 2382 | + page_ref_add(p, USHRT_MAX - 1); |
---|
| 2383 | + cb->pagecnt_bias = USHRT_MAX; |
---|
1735 | 2384 | |
---|
1736 | 2385 | return 0; |
---|
1737 | 2386 | } |
---|
1738 | 2387 | |
---|
1739 | 2388 | static void hns3_free_buffer(struct hns3_enet_ring *ring, |
---|
1740 | | - struct hns3_desc_cb *cb) |
---|
| 2389 | + struct hns3_desc_cb *cb, int budget) |
---|
1741 | 2390 | { |
---|
1742 | 2391 | if (cb->type == DESC_TYPE_SKB) |
---|
1743 | | - dev_kfree_skb_any((struct sk_buff *)cb->priv); |
---|
1744 | | - else if (!HNAE3_IS_TX_RING(ring)) |
---|
1745 | | - put_page((struct page *)cb->priv); |
---|
| 2392 | + napi_consume_skb(cb->priv, budget); |
---|
| 2393 | + else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias) |
---|
| 2394 | + __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); |
---|
1746 | 2395 | memset(cb, 0, sizeof(*cb)); |
---|
1747 | 2396 | } |
---|
1748 | 2397 | |
---|
.. | .. |
---|
1751 | 2400 | cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, |
---|
1752 | 2401 | cb->length, ring_to_dma_dir(ring)); |
---|
1753 | 2402 | |
---|
1754 | | - if (dma_mapping_error(ring_to_dev(ring), cb->dma)) |
---|
| 2403 | + if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) |
---|
1755 | 2404 | return -EIO; |
---|
1756 | 2405 | |
---|
1757 | 2406 | return 0; |
---|
.. | .. |
---|
1760 | 2409 | static void hns3_unmap_buffer(struct hns3_enet_ring *ring, |
---|
1761 | 2410 | struct hns3_desc_cb *cb) |
---|
1762 | 2411 | { |
---|
1763 | | - if (cb->type == DESC_TYPE_SKB) |
---|
| 2412 | + if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB) |
---|
1764 | 2413 | dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, |
---|
1765 | 2414 | ring_to_dma_dir(ring)); |
---|
1766 | | - else |
---|
| 2415 | + else if (cb->length) |
---|
1767 | 2416 | dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, |
---|
1768 | 2417 | ring_to_dma_dir(ring)); |
---|
1769 | 2418 | } |
---|
.. | .. |
---|
1772 | 2421 | { |
---|
1773 | 2422 | hns3_unmap_buffer(ring, &ring->desc_cb[i]); |
---|
1774 | 2423 | ring->desc[i].addr = 0; |
---|
| 2424 | + ring->desc_cb[i].refill = 0; |
---|
1775 | 2425 | } |
---|
1776 | 2426 | |
---|
1777 | | -static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) |
---|
| 2427 | +static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, |
---|
| 2428 | + int budget) |
---|
1778 | 2429 | { |
---|
1779 | 2430 | struct hns3_desc_cb *cb = &ring->desc_cb[i]; |
---|
1780 | 2431 | |
---|
.. | .. |
---|
1782 | 2433 | return; |
---|
1783 | 2434 | |
---|
1784 | 2435 | hns3_buffer_detach(ring, i); |
---|
1785 | | - hns3_free_buffer(ring, cb); |
---|
| 2436 | + hns3_free_buffer(ring, cb, budget); |
---|
1786 | 2437 | } |
---|
1787 | 2438 | |
---|
1788 | 2439 | static void hns3_free_buffers(struct hns3_enet_ring *ring) |
---|
.. | .. |
---|
1790 | 2441 | int i; |
---|
1791 | 2442 | |
---|
1792 | 2443 | for (i = 0; i < ring->desc_num; i++) |
---|
1793 | | - hns3_free_buffer_detach(ring, i); |
---|
| 2444 | + hns3_free_buffer_detach(ring, i, 0); |
---|
1794 | 2445 | } |
---|
1795 | 2446 | |
---|
1796 | 2447 | /* free desc along with its attached buffer */ |
---|
.. | .. |
---|
1811 | 2462 | { |
---|
1812 | 2463 | int size = ring->desc_num * sizeof(ring->desc[0]); |
---|
1813 | 2464 | |
---|
1814 | | - ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, |
---|
1815 | | - &ring->desc_dma_addr, |
---|
1816 | | - GFP_KERNEL); |
---|
| 2465 | + ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, |
---|
| 2466 | + &ring->desc_dma_addr, GFP_KERNEL); |
---|
1817 | 2467 | if (!ring->desc) |
---|
1818 | 2468 | return -ENOMEM; |
---|
1819 | 2469 | |
---|
1820 | 2470 | return 0; |
---|
1821 | 2471 | } |
---|
1822 | 2472 | |
---|
1823 | | -static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, |
---|
| 2473 | +static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, |
---|
1824 | 2474 | struct hns3_desc_cb *cb) |
---|
1825 | 2475 | { |
---|
1826 | 2476 | int ret; |
---|
.. | .. |
---|
1836 | 2486 | return 0; |
---|
1837 | 2487 | |
---|
1838 | 2488 | out_with_buf: |
---|
1839 | | - hns3_free_buffer(ring, cb); |
---|
| 2489 | + hns3_free_buffer(ring, cb, 0); |
---|
1840 | 2490 | out: |
---|
1841 | 2491 | return ret; |
---|
1842 | 2492 | } |
---|
1843 | 2493 | |
---|
1844 | | -static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) |
---|
| 2494 | +static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) |
---|
1845 | 2495 | { |
---|
1846 | | - int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); |
---|
| 2496 | + int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); |
---|
1847 | 2497 | |
---|
1848 | 2498 | if (ret) |
---|
1849 | 2499 | return ret; |
---|
1850 | 2500 | |
---|
1851 | 2501 | ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); |
---|
| 2502 | + ring->desc_cb[i].refill = 1; |
---|
1852 | 2503 | |
---|
1853 | 2504 | return 0; |
---|
1854 | 2505 | } |
---|
.. | .. |
---|
1859 | 2510 | int i, j, ret; |
---|
1860 | 2511 | |
---|
1861 | 2512 | for (i = 0; i < ring->desc_num; i++) { |
---|
1862 | | - ret = hns3_alloc_buffer_attach(ring, i); |
---|
| 2513 | + ret = hns3_alloc_and_attach_buffer(ring, i); |
---|
1863 | 2514 | if (ret) |
---|
1864 | 2515 | goto out_buffer_fail; |
---|
1865 | 2516 | } |
---|
.. | .. |
---|
1868 | 2519 | |
---|
1869 | 2520 | out_buffer_fail: |
---|
1870 | 2521 | for (j = i - 1; j >= 0; j--) |
---|
1871 | | - hns3_free_buffer_detach(ring, j); |
---|
| 2522 | + hns3_free_buffer_detach(ring, j, 0); |
---|
1872 | 2523 | return ret; |
---|
1873 | 2524 | } |
---|
1874 | 2525 | |
---|
1875 | | -/* detach a in-used buffer and replace with a reserved one */ |
---|
| 2526 | +/* detach a in-used buffer and replace with a reserved one */ |
---|
1876 | 2527 | static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, |
---|
1877 | 2528 | struct hns3_desc_cb *res_cb) |
---|
1878 | 2529 | { |
---|
1879 | 2530 | hns3_unmap_buffer(ring, &ring->desc_cb[i]); |
---|
1880 | 2531 | ring->desc_cb[i] = *res_cb; |
---|
1881 | 2532 | ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); |
---|
| 2533 | + ring->desc_cb[i].refill = 1; |
---|
1882 | 2534 | ring->desc[i].rx.bd_base_info = 0; |
---|
1883 | 2535 | } |
---|
1884 | 2536 | |
---|
1885 | 2537 | static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) |
---|
1886 | 2538 | { |
---|
1887 | 2539 | ring->desc_cb[i].reuse_flag = 0; |
---|
1888 | | - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma |
---|
1889 | | - + ring->desc_cb[i].page_offset); |
---|
| 2540 | + ring->desc_cb[i].refill = 1; |
---|
| 2541 | + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + |
---|
| 2542 | + ring->desc_cb[i].page_offset); |
---|
1890 | 2543 | ring->desc[i].rx.bd_base_info = 0; |
---|
| 2544 | + |
---|
| 2545 | + dma_sync_single_for_device(ring_to_dev(ring), |
---|
| 2546 | + ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, |
---|
| 2547 | + hns3_buf_size(ring), |
---|
| 2548 | + DMA_FROM_DEVICE); |
---|
1891 | 2549 | } |
---|
1892 | 2550 | |
---|
1893 | | -static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, |
---|
1894 | | - int *pkts) |
---|
| 2551 | +static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, |
---|
| 2552 | + int *bytes, int *pkts, int budget) |
---|
1895 | 2553 | { |
---|
1896 | | - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; |
---|
| 2554 | + /* pair with ring->last_to_use update in hns3_tx_doorbell(), |
---|
| 2555 | + * smp_store_release() is not used in hns3_tx_doorbell() because |
---|
| 2556 | + * the doorbell operation already have the needed barrier operation. |
---|
| 2557 | + */ |
---|
| 2558 | + int ltu = smp_load_acquire(&ring->last_to_use); |
---|
| 2559 | + int ntc = ring->next_to_clean; |
---|
| 2560 | + struct hns3_desc_cb *desc_cb; |
---|
| 2561 | + bool reclaimed = false; |
---|
| 2562 | + struct hns3_desc *desc; |
---|
1897 | 2563 | |
---|
1898 | | - (*pkts) += (desc_cb->type == DESC_TYPE_SKB); |
---|
1899 | | - (*bytes) += desc_cb->length; |
---|
1900 | | - /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ |
---|
1901 | | - hns3_free_buffer_detach(ring, ring->next_to_clean); |
---|
| 2564 | + while (ltu != ntc) { |
---|
| 2565 | + desc = &ring->desc[ntc]; |
---|
1902 | 2566 | |
---|
1903 | | - ring_ptr_move_fw(ring, next_to_clean); |
---|
| 2567 | + if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & |
---|
| 2568 | + BIT(HNS3_TXD_VLD_B)) |
---|
| 2569 | + break; |
---|
| 2570 | + |
---|
| 2571 | + desc_cb = &ring->desc_cb[ntc]; |
---|
| 2572 | + (*pkts) += (desc_cb->type == DESC_TYPE_SKB); |
---|
| 2573 | + (*bytes) += desc_cb->length; |
---|
| 2574 | + /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ |
---|
| 2575 | + hns3_free_buffer_detach(ring, ntc, budget); |
---|
| 2576 | + |
---|
| 2577 | + if (++ntc == ring->desc_num) |
---|
| 2578 | + ntc = 0; |
---|
| 2579 | + |
---|
| 2580 | + /* Issue prefetch for next Tx descriptor */ |
---|
| 2581 | + prefetch(&ring->desc_cb[ntc]); |
---|
| 2582 | + reclaimed = true; |
---|
| 2583 | + } |
---|
| 2584 | + |
---|
| 2585 | + if (unlikely(!reclaimed)) |
---|
| 2586 | + return false; |
---|
| 2587 | + |
---|
| 2588 | + /* This smp_store_release() pairs with smp_load_acquire() in |
---|
| 2589 | + * ring_space called by hns3_nic_net_xmit. |
---|
| 2590 | + */ |
---|
| 2591 | + smp_store_release(&ring->next_to_clean, ntc); |
---|
| 2592 | + return true; |
---|
1904 | 2593 | } |
---|
1905 | 2594 | |
---|
1906 | | -static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) |
---|
| 2595 | +void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) |
---|
1907 | 2596 | { |
---|
1908 | | - int u = ring->next_to_use; |
---|
1909 | | - int c = ring->next_to_clean; |
---|
1910 | | - |
---|
1911 | | - if (unlikely(h > ring->desc_num)) |
---|
1912 | | - return 0; |
---|
1913 | | - |
---|
1914 | | - return u > c ? (h > c && h <= u) : (h > c || h <= u); |
---|
1915 | | -} |
---|
1916 | | - |
---|
1917 | | -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) |
---|
1918 | | -{ |
---|
1919 | | - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; |
---|
| 2597 | + struct net_device *netdev = ring_to_netdev(ring); |
---|
1920 | 2598 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
1921 | 2599 | struct netdev_queue *dev_queue; |
---|
1922 | 2600 | int bytes, pkts; |
---|
1923 | | - int head; |
---|
1924 | | - |
---|
1925 | | - head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); |
---|
1926 | | - rmb(); /* Make sure head is ready before touch any data */ |
---|
1927 | | - |
---|
1928 | | - if (is_ring_empty(ring) || head == ring->next_to_clean) |
---|
1929 | | - return true; /* no data to poll */ |
---|
1930 | | - |
---|
1931 | | - if (unlikely(!is_valid_clean_head(ring, head))) { |
---|
1932 | | - netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, |
---|
1933 | | - ring->next_to_use, ring->next_to_clean); |
---|
1934 | | - |
---|
1935 | | - u64_stats_update_begin(&ring->syncp); |
---|
1936 | | - ring->stats.io_err_cnt++; |
---|
1937 | | - u64_stats_update_end(&ring->syncp); |
---|
1938 | | - return true; |
---|
1939 | | - } |
---|
1940 | 2601 | |
---|
1941 | 2602 | bytes = 0; |
---|
1942 | 2603 | pkts = 0; |
---|
1943 | | - while (head != ring->next_to_clean && budget) { |
---|
1944 | | - hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); |
---|
1945 | | - /* Issue prefetch for next Tx descriptor */ |
---|
1946 | | - prefetch(&ring->desc_cb[ring->next_to_clean]); |
---|
1947 | | - budget--; |
---|
1948 | | - } |
---|
| 2604 | + |
---|
| 2605 | + if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) |
---|
| 2606 | + return; |
---|
1949 | 2607 | |
---|
1950 | 2608 | ring->tqp_vector->tx_group.total_bytes += bytes; |
---|
1951 | 2609 | ring->tqp_vector->tx_group.total_packets += pkts; |
---|
.. | .. |
---|
1958 | 2616 | dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); |
---|
1959 | 2617 | netdev_tx_completed_queue(dev_queue, pkts, bytes); |
---|
1960 | 2618 | |
---|
1961 | | - if (unlikely(pkts && netif_carrier_ok(netdev) && |
---|
1962 | | - (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { |
---|
| 2619 | + if (unlikely(netif_carrier_ok(netdev) && |
---|
| 2620 | + ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { |
---|
1963 | 2621 | /* Make sure that anybody stopping the queue after this |
---|
1964 | 2622 | * sees the new next_to_clean. |
---|
1965 | 2623 | */ |
---|
.. | .. |
---|
1970 | 2628 | ring->stats.restart_queue++; |
---|
1971 | 2629 | } |
---|
1972 | 2630 | } |
---|
1973 | | - |
---|
1974 | | - return !!budget; |
---|
1975 | 2631 | } |
---|
1976 | 2632 | |
---|
1977 | 2633 | static int hns3_desc_unused(struct hns3_enet_ring *ring) |
---|
.. | .. |
---|
1979 | 2635 | int ntc = ring->next_to_clean; |
---|
1980 | 2636 | int ntu = ring->next_to_use; |
---|
1981 | 2637 | |
---|
| 2638 | + if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) |
---|
| 2639 | + return ring->desc_num; |
---|
| 2640 | + |
---|
1982 | 2641 | return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; |
---|
1983 | 2642 | } |
---|
1984 | 2643 | |
---|
1985 | | -static void |
---|
1986 | | -hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) |
---|
| 2644 | +/* Return true if there is any allocation failure */ |
---|
| 2645 | +static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, |
---|
| 2646 | + int cleand_count) |
---|
1987 | 2647 | { |
---|
1988 | 2648 | struct hns3_desc_cb *desc_cb; |
---|
1989 | 2649 | struct hns3_desc_cb res_cbs; |
---|
.. | .. |
---|
1998 | 2658 | |
---|
1999 | 2659 | hns3_reuse_buffer(ring, ring->next_to_use); |
---|
2000 | 2660 | } else { |
---|
2001 | | - ret = hns3_reserve_buffer_map(ring, &res_cbs); |
---|
| 2661 | + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); |
---|
2002 | 2662 | if (ret) { |
---|
2003 | 2663 | u64_stats_update_begin(&ring->syncp); |
---|
2004 | 2664 | ring->stats.sw_err_cnt++; |
---|
2005 | 2665 | u64_stats_update_end(&ring->syncp); |
---|
2006 | 2666 | |
---|
2007 | | - netdev_err(ring->tqp->handle->kinfo.netdev, |
---|
2008 | | - "hnae reserve buffer map failed.\n"); |
---|
2009 | | - break; |
---|
| 2667 | + hns3_rl_err(ring_to_netdev(ring), |
---|
| 2668 | + "alloc rx buffer failed: %d\n", |
---|
| 2669 | + ret); |
---|
| 2670 | + |
---|
| 2671 | + writel(i, ring->tqp->io_base + |
---|
| 2672 | + HNS3_RING_RX_RING_HEAD_REG); |
---|
| 2673 | + return true; |
---|
2010 | 2674 | } |
---|
2011 | 2675 | hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); |
---|
| 2676 | + |
---|
| 2677 | + u64_stats_update_begin(&ring->syncp); |
---|
| 2678 | + ring->stats.non_reuse_pg++; |
---|
| 2679 | + u64_stats_update_end(&ring->syncp); |
---|
2012 | 2680 | } |
---|
2013 | 2681 | |
---|
2014 | 2682 | ring_ptr_move_fw(ring, next_to_use); |
---|
2015 | 2683 | } |
---|
2016 | 2684 | |
---|
2017 | | - wmb(); /* Make all data has been write before submit */ |
---|
2018 | | - writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); |
---|
| 2685 | + writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); |
---|
| 2686 | + return false; |
---|
| 2687 | +} |
---|
| 2688 | + |
---|
| 2689 | +static bool hns3_page_is_reusable(struct page *page) |
---|
| 2690 | +{ |
---|
| 2691 | + return page_to_nid(page) == numa_mem_id() && |
---|
| 2692 | + !page_is_pfmemalloc(page); |
---|
| 2693 | +} |
---|
| 2694 | + |
---|
| 2695 | +static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) |
---|
| 2696 | +{ |
---|
| 2697 | + return (page_count(cb->priv) - cb->pagecnt_bias) == 1; |
---|
2019 | 2698 | } |
---|
2020 | 2699 | |
---|
2021 | 2700 | static void hns3_nic_reuse_page(struct sk_buff *skb, int i, |
---|
2022 | 2701 | struct hns3_enet_ring *ring, int pull_len, |
---|
2023 | 2702 | struct hns3_desc_cb *desc_cb) |
---|
2024 | 2703 | { |
---|
2025 | | - struct hns3_desc *desc; |
---|
2026 | | - u32 truesize; |
---|
2027 | | - int size; |
---|
2028 | | - int last_offset; |
---|
2029 | | - bool twobufs; |
---|
| 2704 | + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; |
---|
| 2705 | + int size = le16_to_cpu(desc->rx.size); |
---|
| 2706 | + u32 truesize = hns3_buf_size(ring); |
---|
2030 | 2707 | |
---|
2031 | | - twobufs = ((PAGE_SIZE < 8192) && |
---|
2032 | | - hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); |
---|
2033 | | - |
---|
2034 | | - desc = &ring->desc[ring->next_to_clean]; |
---|
2035 | | - size = le16_to_cpu(desc->rx.size); |
---|
2036 | | - |
---|
2037 | | - truesize = hnae3_buf_size(ring); |
---|
2038 | | - |
---|
2039 | | - if (!twobufs) |
---|
2040 | | - last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); |
---|
2041 | | - |
---|
| 2708 | + desc_cb->pagecnt_bias--; |
---|
2042 | 2709 | skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, |
---|
2043 | 2710 | size - pull_len, truesize); |
---|
2044 | 2711 | |
---|
2045 | | - /* Avoid re-using remote pages,flag default unreuse */ |
---|
2046 | | - if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) |
---|
2047 | | - return; |
---|
2048 | | - |
---|
2049 | | - if (twobufs) { |
---|
2050 | | - /* If we are only owner of page we can reuse it */ |
---|
2051 | | - if (likely(page_count(desc_cb->priv) == 1)) { |
---|
2052 | | - /* Flip page offset to other buffer */ |
---|
2053 | | - desc_cb->page_offset ^= truesize; |
---|
2054 | | - |
---|
2055 | | - desc_cb->reuse_flag = 1; |
---|
2056 | | - /* bump ref count on page before it is given*/ |
---|
2057 | | - get_page(desc_cb->priv); |
---|
2058 | | - } |
---|
| 2712 | + /* Avoid re-using remote pages, or the stack is still using the page |
---|
| 2713 | + * when page_offset rollback to zero, flag default unreuse |
---|
| 2714 | + */ |
---|
| 2715 | + if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || |
---|
| 2716 | + (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) { |
---|
| 2717 | + __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); |
---|
2059 | 2718 | return; |
---|
2060 | 2719 | } |
---|
2061 | 2720 | |
---|
2062 | 2721 | /* Move offset up to the next cache line */ |
---|
2063 | 2722 | desc_cb->page_offset += truesize; |
---|
2064 | 2723 | |
---|
2065 | | - if (desc_cb->page_offset <= last_offset) { |
---|
| 2724 | + if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { |
---|
2066 | 2725 | desc_cb->reuse_flag = 1; |
---|
2067 | | - /* Bump ref count on page before it is given*/ |
---|
2068 | | - get_page(desc_cb->priv); |
---|
| 2726 | + } else if (hns3_can_reuse_page(desc_cb)) { |
---|
| 2727 | + desc_cb->reuse_flag = 1; |
---|
| 2728 | + desc_cb->page_offset = 0; |
---|
| 2729 | + } else if (desc_cb->pagecnt_bias) { |
---|
| 2730 | + __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); |
---|
| 2731 | + return; |
---|
| 2732 | + } |
---|
| 2733 | + |
---|
| 2734 | + if (unlikely(!desc_cb->pagecnt_bias)) { |
---|
| 2735 | + page_ref_add(desc_cb->priv, USHRT_MAX); |
---|
| 2736 | + desc_cb->pagecnt_bias = USHRT_MAX; |
---|
2069 | 2737 | } |
---|
2070 | 2738 | } |
---|
2071 | 2739 | |
---|
2072 | | -static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, |
---|
2073 | | - struct hns3_desc *desc) |
---|
| 2740 | +static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) |
---|
2074 | 2741 | { |
---|
2075 | | - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; |
---|
2076 | | - int l3_type, l4_type; |
---|
2077 | | - u32 bd_base_info; |
---|
2078 | | - int ol4_type; |
---|
2079 | | - u32 l234info; |
---|
| 2742 | + __be16 type = skb->protocol; |
---|
| 2743 | + struct tcphdr *th; |
---|
| 2744 | + int depth = 0; |
---|
2080 | 2745 | |
---|
2081 | | - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); |
---|
2082 | | - l234info = le32_to_cpu(desc->rx.l234_info); |
---|
| 2746 | + while (eth_type_vlan(type)) { |
---|
| 2747 | + struct vlan_hdr *vh; |
---|
| 2748 | + |
---|
| 2749 | + if ((depth + VLAN_HLEN) > skb_headlen(skb)) |
---|
| 2750 | + return -EFAULT; |
---|
| 2751 | + |
---|
| 2752 | + vh = (struct vlan_hdr *)(skb->data + depth); |
---|
| 2753 | + type = vh->h_vlan_encapsulated_proto; |
---|
| 2754 | + depth += VLAN_HLEN; |
---|
| 2755 | + } |
---|
| 2756 | + |
---|
| 2757 | + skb_set_network_header(skb, depth); |
---|
| 2758 | + |
---|
| 2759 | + if (type == htons(ETH_P_IP)) { |
---|
| 2760 | + const struct iphdr *iph = ip_hdr(skb); |
---|
| 2761 | + |
---|
| 2762 | + depth += sizeof(struct iphdr); |
---|
| 2763 | + skb_set_transport_header(skb, depth); |
---|
| 2764 | + th = tcp_hdr(skb); |
---|
| 2765 | + th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, |
---|
| 2766 | + iph->daddr, 0); |
---|
| 2767 | + } else if (type == htons(ETH_P_IPV6)) { |
---|
| 2768 | + const struct ipv6hdr *iph = ipv6_hdr(skb); |
---|
| 2769 | + |
---|
| 2770 | + depth += sizeof(struct ipv6hdr); |
---|
| 2771 | + skb_set_transport_header(skb, depth); |
---|
| 2772 | + th = tcp_hdr(skb); |
---|
| 2773 | + th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, |
---|
| 2774 | + &iph->daddr, 0); |
---|
| 2775 | + } else { |
---|
| 2776 | + hns3_rl_err(skb->dev, |
---|
| 2777 | + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", |
---|
| 2778 | + be16_to_cpu(type), depth); |
---|
| 2779 | + return -EFAULT; |
---|
| 2780 | + } |
---|
| 2781 | + |
---|
| 2782 | + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; |
---|
| 2783 | + if (th->cwr) |
---|
| 2784 | + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
---|
| 2785 | + |
---|
| 2786 | + if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) |
---|
| 2787 | + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; |
---|
| 2788 | + |
---|
| 2789 | + skb->csum_start = (unsigned char *)th - skb->head; |
---|
| 2790 | + skb->csum_offset = offsetof(struct tcphdr, check); |
---|
| 2791 | + skb->ip_summed = CHECKSUM_PARTIAL; |
---|
| 2792 | + |
---|
| 2793 | + trace_hns3_gro(skb); |
---|
| 2794 | + |
---|
| 2795 | + return 0; |
---|
| 2796 | +} |
---|
| 2797 | + |
---|
| 2798 | +static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, |
---|
| 2799 | + u32 l234info, u32 bd_base_info, u32 ol_info) |
---|
| 2800 | +{ |
---|
| 2801 | + struct net_device *netdev = ring_to_netdev(ring); |
---|
| 2802 | + int l3_type, l4_type; |
---|
| 2803 | + int ol4_type; |
---|
2083 | 2804 | |
---|
2084 | 2805 | skb->ip_summed = CHECKSUM_NONE; |
---|
2085 | 2806 | |
---|
.. | .. |
---|
2089 | 2810 | return; |
---|
2090 | 2811 | |
---|
2091 | 2812 | /* check if hardware has done checksum */ |
---|
2092 | | - if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) |
---|
| 2813 | + if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) |
---|
2093 | 2814 | return; |
---|
2094 | 2815 | |
---|
2095 | | - if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || |
---|
2096 | | - hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || |
---|
2097 | | - hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || |
---|
2098 | | - hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { |
---|
2099 | | - netdev_err(netdev, "L3/L4 error pkt\n"); |
---|
| 2816 | + if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | |
---|
| 2817 | + BIT(HNS3_RXD_OL3E_B) | |
---|
| 2818 | + BIT(HNS3_RXD_OL4E_B)))) { |
---|
2100 | 2819 | u64_stats_update_begin(&ring->syncp); |
---|
2101 | 2820 | ring->stats.l3l4_csum_err++; |
---|
2102 | 2821 | u64_stats_update_end(&ring->syncp); |
---|
.. | .. |
---|
2104 | 2823 | return; |
---|
2105 | 2824 | } |
---|
2106 | 2825 | |
---|
2107 | | - l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, |
---|
2108 | | - HNS3_RXD_L3ID_S); |
---|
2109 | | - l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, |
---|
2110 | | - HNS3_RXD_L4ID_S); |
---|
2111 | | - |
---|
2112 | | - ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, |
---|
| 2826 | + ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, |
---|
2113 | 2827 | HNS3_RXD_OL4ID_S); |
---|
2114 | 2828 | switch (ol4_type) { |
---|
2115 | 2829 | case HNS3_OL4_TYPE_MAC_IN_UDP: |
---|
2116 | 2830 | case HNS3_OL4_TYPE_NVGRE: |
---|
2117 | 2831 | skb->csum_level = 1; |
---|
2118 | | - /* fall through */ |
---|
| 2832 | + fallthrough; |
---|
2119 | 2833 | case HNS3_OL4_TYPE_NO_TUN: |
---|
| 2834 | + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, |
---|
| 2835 | + HNS3_RXD_L3ID_S); |
---|
| 2836 | + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, |
---|
| 2837 | + HNS3_RXD_L4ID_S); |
---|
| 2838 | + |
---|
2120 | 2839 | /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ |
---|
2121 | 2840 | if ((l3_type == HNS3_L3_TYPE_IPV4 || |
---|
2122 | 2841 | l3_type == HNS3_L3_TYPE_IPV6) && |
---|
.. | .. |
---|
2125 | 2844 | l4_type == HNS3_L4_TYPE_SCTP)) |
---|
2126 | 2845 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
2127 | 2846 | break; |
---|
| 2847 | + default: |
---|
| 2848 | + break; |
---|
2128 | 2849 | } |
---|
2129 | 2850 | } |
---|
2130 | 2851 | |
---|
2131 | 2852 | static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) |
---|
2132 | 2853 | { |
---|
| 2854 | + if (skb_has_frag_list(skb)) |
---|
| 2855 | + napi_gro_flush(&ring->tqp_vector->napi, false); |
---|
| 2856 | + |
---|
2133 | 2857 | napi_gro_receive(&ring->tqp_vector->napi, skb); |
---|
2134 | 2858 | } |
---|
2135 | 2859 | |
---|
.. | .. |
---|
2137 | 2861 | struct hns3_desc *desc, u32 l234info, |
---|
2138 | 2862 | u16 *vlan_tag) |
---|
2139 | 2863 | { |
---|
| 2864 | + struct hnae3_handle *handle = ring->tqp->handle; |
---|
2140 | 2865 | struct pci_dev *pdev = ring->tqp->handle->pdev; |
---|
| 2866 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
---|
2141 | 2867 | |
---|
2142 | | - if (pdev->revision == 0x20) { |
---|
| 2868 | + if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { |
---|
2143 | 2869 | *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); |
---|
2144 | 2870 | if (!(*vlan_tag & VLAN_VID_MASK)) |
---|
2145 | 2871 | *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); |
---|
.. | .. |
---|
2149 | 2875 | |
---|
2150 | 2876 | #define HNS3_STRP_OUTER_VLAN 0x1 |
---|
2151 | 2877 | #define HNS3_STRP_INNER_VLAN 0x2 |
---|
| 2878 | +#define HNS3_STRP_BOTH 0x3 |
---|
2152 | 2879 | |
---|
| 2880 | + /* Hardware always insert VLAN tag into RX descriptor when |
---|
| 2881 | + * remove the tag from packet, driver needs to determine |
---|
| 2882 | + * reporting which tag to stack. |
---|
| 2883 | + */ |
---|
2153 | 2884 | switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, |
---|
2154 | 2885 | HNS3_RXD_STRP_TAGP_S)) { |
---|
2155 | 2886 | case HNS3_STRP_OUTER_VLAN: |
---|
| 2887 | + if (handle->port_base_vlan_state != |
---|
| 2888 | + HNAE3_PORT_BASE_VLAN_DISABLE) |
---|
| 2889 | + return false; |
---|
| 2890 | + |
---|
2156 | 2891 | *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); |
---|
2157 | 2892 | return true; |
---|
2158 | 2893 | case HNS3_STRP_INNER_VLAN: |
---|
| 2894 | + if (handle->port_base_vlan_state != |
---|
| 2895 | + HNAE3_PORT_BASE_VLAN_DISABLE) |
---|
| 2896 | + return false; |
---|
| 2897 | + |
---|
2159 | 2898 | *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); |
---|
| 2899 | + return true; |
---|
| 2900 | + case HNS3_STRP_BOTH: |
---|
| 2901 | + if (handle->port_base_vlan_state == |
---|
| 2902 | + HNAE3_PORT_BASE_VLAN_DISABLE) |
---|
| 2903 | + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); |
---|
| 2904 | + else |
---|
| 2905 | + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); |
---|
| 2906 | + |
---|
2160 | 2907 | return true; |
---|
2161 | 2908 | default: |
---|
2162 | 2909 | return false; |
---|
2163 | 2910 | } |
---|
2164 | 2911 | } |
---|
2165 | 2912 | |
---|
2166 | | -static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, |
---|
2167 | | - struct sk_buff **out_skb, int *out_bnum) |
---|
| 2913 | +static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) |
---|
2168 | 2914 | { |
---|
2169 | | - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; |
---|
2170 | | - struct hns3_desc_cb *desc_cb; |
---|
2171 | | - struct hns3_desc *desc; |
---|
| 2915 | + ring->desc[ring->next_to_clean].rx.bd_base_info &= |
---|
| 2916 | + cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); |
---|
| 2917 | + ring->desc_cb[ring->next_to_clean].refill = 0; |
---|
| 2918 | + ring->next_to_clean += 1; |
---|
| 2919 | + |
---|
| 2920 | + if (unlikely(ring->next_to_clean == ring->desc_num)) |
---|
| 2921 | + ring->next_to_clean = 0; |
---|
| 2922 | +} |
---|
| 2923 | + |
---|
| 2924 | +static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, |
---|
| 2925 | + unsigned char *va) |
---|
| 2926 | +{ |
---|
| 2927 | + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; |
---|
| 2928 | + struct net_device *netdev = ring_to_netdev(ring); |
---|
2172 | 2929 | struct sk_buff *skb; |
---|
2173 | | - unsigned char *va; |
---|
2174 | | - u32 bd_base_info; |
---|
2175 | | - int pull_len; |
---|
2176 | | - u32 l234info; |
---|
2177 | | - int length; |
---|
2178 | | - int bnum; |
---|
2179 | 2930 | |
---|
2180 | | - desc = &ring->desc[ring->next_to_clean]; |
---|
2181 | | - desc_cb = &ring->desc_cb[ring->next_to_clean]; |
---|
2182 | | - |
---|
2183 | | - prefetch(desc); |
---|
2184 | | - |
---|
2185 | | - length = le16_to_cpu(desc->rx.size); |
---|
2186 | | - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); |
---|
2187 | | - |
---|
2188 | | - /* Check valid BD */ |
---|
2189 | | - if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) |
---|
2190 | | - return -EFAULT; |
---|
2191 | | - |
---|
2192 | | - va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; |
---|
2193 | | - |
---|
2194 | | - /* Prefetch first cache line of first page |
---|
2195 | | - * Idea is to cache few bytes of the header of the packet. Our L1 Cache |
---|
2196 | | - * line size is 64B so need to prefetch twice to make it 128B. But in |
---|
2197 | | - * actual we can have greater size of caches with 128B Level 1 cache |
---|
2198 | | - * lines. In such a case, single fetch would suffice to cache in the |
---|
2199 | | - * relevant part of the header. |
---|
2200 | | - */ |
---|
2201 | | - prefetch(va); |
---|
2202 | | -#if L1_CACHE_BYTES < 128 |
---|
2203 | | - prefetch(va + L1_CACHE_BYTES); |
---|
2204 | | -#endif |
---|
2205 | | - |
---|
2206 | | - skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, |
---|
2207 | | - HNS3_RX_HEAD_SIZE); |
---|
| 2931 | + ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); |
---|
| 2932 | + skb = ring->skb; |
---|
2208 | 2933 | if (unlikely(!skb)) { |
---|
2209 | | - netdev_err(netdev, "alloc rx skb fail\n"); |
---|
| 2934 | + hns3_rl_err(netdev, "alloc rx skb fail\n"); |
---|
2210 | 2935 | |
---|
2211 | 2936 | u64_stats_update_begin(&ring->syncp); |
---|
2212 | 2937 | ring->stats.sw_err_cnt++; |
---|
.. | .. |
---|
2215 | 2940 | return -ENOMEM; |
---|
2216 | 2941 | } |
---|
2217 | 2942 | |
---|
| 2943 | + trace_hns3_rx_desc(ring); |
---|
2218 | 2944 | prefetchw(skb->data); |
---|
2219 | 2945 | |
---|
2220 | | - bnum = 1; |
---|
| 2946 | + ring->pending_buf = 1; |
---|
| 2947 | + ring->frag_num = 0; |
---|
| 2948 | + ring->tail_skb = NULL; |
---|
2221 | 2949 | if (length <= HNS3_RX_HEAD_SIZE) { |
---|
2222 | 2950 | memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); |
---|
2223 | 2951 | |
---|
2224 | 2952 | /* We can reuse buffer as-is, just make sure it is local */ |
---|
2225 | | - if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) |
---|
| 2953 | + if (likely(hns3_page_is_reusable(desc_cb->priv))) |
---|
2226 | 2954 | desc_cb->reuse_flag = 1; |
---|
2227 | 2955 | else /* This page cannot be reused so discard it */ |
---|
2228 | | - put_page(desc_cb->priv); |
---|
| 2956 | + __page_frag_cache_drain(desc_cb->priv, |
---|
| 2957 | + desc_cb->pagecnt_bias); |
---|
2229 | 2958 | |
---|
2230 | | - ring_ptr_move_fw(ring, next_to_clean); |
---|
2231 | | - } else { |
---|
2232 | | - u64_stats_update_begin(&ring->syncp); |
---|
2233 | | - ring->stats.seg_pkt_cnt++; |
---|
2234 | | - u64_stats_update_end(&ring->syncp); |
---|
| 2959 | + hns3_rx_ring_move_fw(ring); |
---|
| 2960 | + return 0; |
---|
| 2961 | + } |
---|
| 2962 | + u64_stats_update_begin(&ring->syncp); |
---|
| 2963 | + ring->stats.seg_pkt_cnt++; |
---|
| 2964 | + u64_stats_update_end(&ring->syncp); |
---|
2235 | 2965 | |
---|
2236 | | - pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); |
---|
| 2966 | + ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); |
---|
| 2967 | + __skb_put(skb, ring->pull_len); |
---|
| 2968 | + hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, |
---|
| 2969 | + desc_cb); |
---|
| 2970 | + hns3_rx_ring_move_fw(ring); |
---|
2237 | 2971 | |
---|
2238 | | - memcpy(__skb_put(skb, pull_len), va, |
---|
2239 | | - ALIGN(pull_len, sizeof(long))); |
---|
| 2972 | + return 0; |
---|
| 2973 | +} |
---|
2240 | 2974 | |
---|
2241 | | - hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); |
---|
2242 | | - ring_ptr_move_fw(ring, next_to_clean); |
---|
| 2975 | +static int hns3_add_frag(struct hns3_enet_ring *ring) |
---|
| 2976 | +{ |
---|
| 2977 | + struct sk_buff *skb = ring->skb; |
---|
| 2978 | + struct sk_buff *head_skb = skb; |
---|
| 2979 | + struct sk_buff *new_skb; |
---|
| 2980 | + struct hns3_desc_cb *desc_cb; |
---|
| 2981 | + struct hns3_desc *desc; |
---|
| 2982 | + u32 bd_base_info; |
---|
2243 | 2983 | |
---|
2244 | | - while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { |
---|
2245 | | - desc = &ring->desc[ring->next_to_clean]; |
---|
2246 | | - desc_cb = &ring->desc_cb[ring->next_to_clean]; |
---|
2247 | | - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); |
---|
2248 | | - hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); |
---|
2249 | | - ring_ptr_move_fw(ring, next_to_clean); |
---|
2250 | | - bnum++; |
---|
| 2984 | + do { |
---|
| 2985 | + desc = &ring->desc[ring->next_to_clean]; |
---|
| 2986 | + desc_cb = &ring->desc_cb[ring->next_to_clean]; |
---|
| 2987 | + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); |
---|
| 2988 | + /* make sure HW write desc complete */ |
---|
| 2989 | + dma_rmb(); |
---|
| 2990 | + if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) |
---|
| 2991 | + return -ENXIO; |
---|
| 2992 | + |
---|
| 2993 | + if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { |
---|
| 2994 | + new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); |
---|
| 2995 | + if (unlikely(!new_skb)) { |
---|
| 2996 | + hns3_rl_err(ring_to_netdev(ring), |
---|
| 2997 | + "alloc rx fraglist skb fail\n"); |
---|
| 2998 | + return -ENXIO; |
---|
| 2999 | + } |
---|
| 3000 | + ring->frag_num = 0; |
---|
| 3001 | + |
---|
| 3002 | + if (ring->tail_skb) { |
---|
| 3003 | + ring->tail_skb->next = new_skb; |
---|
| 3004 | + ring->tail_skb = new_skb; |
---|
| 3005 | + } else { |
---|
| 3006 | + skb_shinfo(skb)->frag_list = new_skb; |
---|
| 3007 | + ring->tail_skb = new_skb; |
---|
| 3008 | + } |
---|
2251 | 3009 | } |
---|
| 3010 | + |
---|
| 3011 | + if (ring->tail_skb) { |
---|
| 3012 | + head_skb->truesize += hns3_buf_size(ring); |
---|
| 3013 | + head_skb->data_len += le16_to_cpu(desc->rx.size); |
---|
| 3014 | + head_skb->len += le16_to_cpu(desc->rx.size); |
---|
| 3015 | + skb = ring->tail_skb; |
---|
| 3016 | + } |
---|
| 3017 | + |
---|
| 3018 | + dma_sync_single_for_cpu(ring_to_dev(ring), |
---|
| 3019 | + desc_cb->dma + desc_cb->page_offset, |
---|
| 3020 | + hns3_buf_size(ring), |
---|
| 3021 | + DMA_FROM_DEVICE); |
---|
| 3022 | + |
---|
| 3023 | + hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); |
---|
| 3024 | + trace_hns3_rx_desc(ring); |
---|
| 3025 | + hns3_rx_ring_move_fw(ring); |
---|
| 3026 | + ring->pending_buf++; |
---|
| 3027 | + } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); |
---|
| 3028 | + |
---|
| 3029 | + return 0; |
---|
| 3030 | +} |
---|
| 3031 | + |
---|
| 3032 | +static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, |
---|
| 3033 | + struct sk_buff *skb, u32 l234info, |
---|
| 3034 | + u32 bd_base_info, u32 ol_info) |
---|
| 3035 | +{ |
---|
| 3036 | + u32 l3_type; |
---|
| 3037 | + |
---|
| 3038 | + skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, |
---|
| 3039 | + HNS3_RXD_GRO_SIZE_M, |
---|
| 3040 | + HNS3_RXD_GRO_SIZE_S); |
---|
| 3041 | + /* if there is no HW GRO, do not set gro params */ |
---|
| 3042 | + if (!skb_shinfo(skb)->gso_size) { |
---|
| 3043 | + hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); |
---|
| 3044 | + return 0; |
---|
2252 | 3045 | } |
---|
2253 | 3046 | |
---|
2254 | | - *out_bnum = bnum; |
---|
| 3047 | + NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, |
---|
| 3048 | + HNS3_RXD_GRO_COUNT_M, |
---|
| 3049 | + HNS3_RXD_GRO_COUNT_S); |
---|
2255 | 3050 | |
---|
| 3051 | + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); |
---|
| 3052 | + if (l3_type == HNS3_L3_TYPE_IPV4) |
---|
| 3053 | + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
---|
| 3054 | + else if (l3_type == HNS3_L3_TYPE_IPV6) |
---|
| 3055 | + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
---|
| 3056 | + else |
---|
| 3057 | + return -EFAULT; |
---|
| 3058 | + |
---|
| 3059 | + return hns3_gro_complete(skb, l234info); |
---|
| 3060 | +} |
---|
| 3061 | + |
---|
| 3062 | +static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, |
---|
| 3063 | + struct sk_buff *skb, u32 rss_hash) |
---|
| 3064 | +{ |
---|
| 3065 | + struct hnae3_handle *handle = ring->tqp->handle; |
---|
| 3066 | + enum pkt_hash_types rss_type; |
---|
| 3067 | + |
---|
| 3068 | + if (rss_hash) |
---|
| 3069 | + rss_type = handle->kinfo.rss_type; |
---|
| 3070 | + else |
---|
| 3071 | + rss_type = PKT_HASH_TYPE_NONE; |
---|
| 3072 | + |
---|
| 3073 | + skb_set_hash(skb, rss_hash, rss_type); |
---|
| 3074 | +} |
---|
| 3075 | + |
---|
| 3076 | +static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) |
---|
| 3077 | +{ |
---|
| 3078 | + struct net_device *netdev = ring_to_netdev(ring); |
---|
| 3079 | + enum hns3_pkt_l2t_type l2_frame_type; |
---|
| 3080 | + u32 bd_base_info, l234info, ol_info; |
---|
| 3081 | + struct hns3_desc *desc; |
---|
| 3082 | + unsigned int len; |
---|
| 3083 | + int pre_ntc, ret; |
---|
| 3084 | + |
---|
| 3085 | + /* bdinfo handled below is only valid on the last BD of the |
---|
| 3086 | + * current packet, and ring->next_to_clean indicates the first |
---|
| 3087 | + * descriptor of next packet, so need - 1 below. |
---|
| 3088 | + */ |
---|
| 3089 | + pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : |
---|
| 3090 | + (ring->desc_num - 1); |
---|
| 3091 | + desc = &ring->desc[pre_ntc]; |
---|
| 3092 | + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); |
---|
2256 | 3093 | l234info = le32_to_cpu(desc->rx.l234_info); |
---|
| 3094 | + ol_info = le32_to_cpu(desc->rx.ol_info); |
---|
2257 | 3095 | |
---|
2258 | 3096 | /* Based on hw strategy, the tag offloaded will be stored at |
---|
2259 | 3097 | * ot_vlan_tag in two layer tag case, and stored at vlan_tag |
---|
.. | .. |
---|
2263 | 3101 | u16 vlan_tag; |
---|
2264 | 3102 | |
---|
2265 | 3103 | if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) |
---|
2266 | | - __vlan_hwaccel_put_tag(skb, |
---|
2267 | | - htons(ETH_P_8021Q), |
---|
| 3104 | + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
---|
2268 | 3105 | vlan_tag); |
---|
2269 | 3106 | } |
---|
2270 | 3107 | |
---|
2271 | | - if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { |
---|
2272 | | - netdev_err(netdev, "no valid bd,%016llx,%016llx\n", |
---|
2273 | | - ((u64 *)desc)[0], ((u64 *)desc)[1]); |
---|
| 3108 | + if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | |
---|
| 3109 | + BIT(HNS3_RXD_L2E_B))))) { |
---|
2274 | 3110 | u64_stats_update_begin(&ring->syncp); |
---|
2275 | | - ring->stats.non_vld_descs++; |
---|
| 3111 | + if (l234info & BIT(HNS3_RXD_L2E_B)) |
---|
| 3112 | + ring->stats.l2_err++; |
---|
| 3113 | + else |
---|
| 3114 | + ring->stats.err_pkt_len++; |
---|
2276 | 3115 | u64_stats_update_end(&ring->syncp); |
---|
2277 | 3116 | |
---|
2278 | | - dev_kfree_skb_any(skb); |
---|
2279 | | - return -EINVAL; |
---|
2280 | | - } |
---|
2281 | | - |
---|
2282 | | - if (unlikely((!desc->rx.pkt_len) || |
---|
2283 | | - hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { |
---|
2284 | | - netdev_err(netdev, "truncated pkt\n"); |
---|
2285 | | - u64_stats_update_begin(&ring->syncp); |
---|
2286 | | - ring->stats.err_pkt_len++; |
---|
2287 | | - u64_stats_update_end(&ring->syncp); |
---|
2288 | | - |
---|
2289 | | - dev_kfree_skb_any(skb); |
---|
2290 | 3117 | return -EFAULT; |
---|
2291 | 3118 | } |
---|
2292 | 3119 | |
---|
2293 | | - if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { |
---|
2294 | | - netdev_err(netdev, "L2 error pkt\n"); |
---|
2295 | | - u64_stats_update_begin(&ring->syncp); |
---|
2296 | | - ring->stats.l2_err++; |
---|
2297 | | - u64_stats_update_end(&ring->syncp); |
---|
| 3120 | + len = skb->len; |
---|
2298 | 3121 | |
---|
2299 | | - dev_kfree_skb_any(skb); |
---|
2300 | | - return -EFAULT; |
---|
| 3122 | + /* Do update ip stack process */ |
---|
| 3123 | + skb->protocol = eth_type_trans(skb, netdev); |
---|
| 3124 | + |
---|
| 3125 | + /* This is needed in order to enable forwarding support */ |
---|
| 3126 | + ret = hns3_set_gro_and_checksum(ring, skb, l234info, |
---|
| 3127 | + bd_base_info, ol_info); |
---|
| 3128 | + if (unlikely(ret)) { |
---|
| 3129 | + u64_stats_update_begin(&ring->syncp); |
---|
| 3130 | + ring->stats.rx_err_cnt++; |
---|
| 3131 | + u64_stats_update_end(&ring->syncp); |
---|
| 3132 | + return ret; |
---|
2301 | 3133 | } |
---|
| 3134 | + |
---|
| 3135 | + l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, |
---|
| 3136 | + HNS3_RXD_DMAC_S); |
---|
2302 | 3137 | |
---|
2303 | 3138 | u64_stats_update_begin(&ring->syncp); |
---|
2304 | 3139 | ring->stats.rx_pkts++; |
---|
2305 | | - ring->stats.rx_bytes += skb->len; |
---|
| 3140 | + ring->stats.rx_bytes += len; |
---|
| 3141 | + |
---|
| 3142 | + if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) |
---|
| 3143 | + ring->stats.rx_multicast++; |
---|
| 3144 | + |
---|
2306 | 3145 | u64_stats_update_end(&ring->syncp); |
---|
2307 | 3146 | |
---|
2308 | | - ring->tqp_vector->rx_group.total_bytes += skb->len; |
---|
| 3147 | + ring->tqp_vector->rx_group.total_bytes += len; |
---|
2309 | 3148 | |
---|
2310 | | - hns3_rx_checksum(ring, skb, desc); |
---|
| 3149 | + hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); |
---|
2311 | 3150 | return 0; |
---|
2312 | 3151 | } |
---|
2313 | 3152 | |
---|
2314 | | -int hns3_clean_rx_ring( |
---|
2315 | | - struct hns3_enet_ring *ring, int budget, |
---|
2316 | | - void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) |
---|
| 3153 | +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) |
---|
| 3154 | +{ |
---|
| 3155 | + struct sk_buff *skb = ring->skb; |
---|
| 3156 | + struct hns3_desc_cb *desc_cb; |
---|
| 3157 | + struct hns3_desc *desc; |
---|
| 3158 | + unsigned int length; |
---|
| 3159 | + u32 bd_base_info; |
---|
| 3160 | + int ret; |
---|
| 3161 | + |
---|
| 3162 | + desc = &ring->desc[ring->next_to_clean]; |
---|
| 3163 | + desc_cb = &ring->desc_cb[ring->next_to_clean]; |
---|
| 3164 | + |
---|
| 3165 | + prefetch(desc); |
---|
| 3166 | + |
---|
| 3167 | + if (!skb) { |
---|
| 3168 | + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); |
---|
| 3169 | + |
---|
| 3170 | + /* Check valid BD */ |
---|
| 3171 | + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) |
---|
| 3172 | + return -ENXIO; |
---|
| 3173 | + |
---|
| 3174 | + dma_rmb(); |
---|
| 3175 | + length = le16_to_cpu(desc->rx.size); |
---|
| 3176 | + |
---|
| 3177 | + ring->va = desc_cb->buf + desc_cb->page_offset; |
---|
| 3178 | + |
---|
| 3179 | + dma_sync_single_for_cpu(ring_to_dev(ring), |
---|
| 3180 | + desc_cb->dma + desc_cb->page_offset, |
---|
| 3181 | + hns3_buf_size(ring), |
---|
| 3182 | + DMA_FROM_DEVICE); |
---|
| 3183 | + |
---|
| 3184 | + /* Prefetch first cache line of first page. |
---|
| 3185 | + * Idea is to cache few bytes of the header of the packet. |
---|
| 3186 | + * Our L1 Cache line size is 64B so need to prefetch twice to make |
---|
| 3187 | + * it 128B. But in actual we can have greater size of caches with |
---|
| 3188 | + * 128B Level 1 cache lines. In such a case, single fetch would |
---|
| 3189 | + * suffice to cache in the relevant part of the header. |
---|
| 3190 | + */ |
---|
| 3191 | + net_prefetch(ring->va); |
---|
| 3192 | + |
---|
| 3193 | + ret = hns3_alloc_skb(ring, length, ring->va); |
---|
| 3194 | + skb = ring->skb; |
---|
| 3195 | + |
---|
| 3196 | + if (ret < 0) /* alloc buffer fail */ |
---|
| 3197 | + return ret; |
---|
| 3198 | + if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ |
---|
| 3199 | + ret = hns3_add_frag(ring); |
---|
| 3200 | + if (ret) |
---|
| 3201 | + return ret; |
---|
| 3202 | + } |
---|
| 3203 | + } else { |
---|
| 3204 | + ret = hns3_add_frag(ring); |
---|
| 3205 | + if (ret) |
---|
| 3206 | + return ret; |
---|
| 3207 | + } |
---|
| 3208 | + |
---|
| 3209 | + /* As the head data may be changed when GRO enable, copy |
---|
| 3210 | + * the head data in after other data rx completed |
---|
| 3211 | + */ |
---|
| 3212 | + if (skb->len > HNS3_RX_HEAD_SIZE) |
---|
| 3213 | + memcpy(skb->data, ring->va, |
---|
| 3214 | + ALIGN(ring->pull_len, sizeof(long))); |
---|
| 3215 | + |
---|
| 3216 | + ret = hns3_handle_bdinfo(ring, skb); |
---|
| 3217 | + if (unlikely(ret)) { |
---|
| 3218 | + dev_kfree_skb_any(skb); |
---|
| 3219 | + return ret; |
---|
| 3220 | + } |
---|
| 3221 | + |
---|
| 3222 | + skb_record_rx_queue(skb, ring->tqp->tqp_index); |
---|
| 3223 | + return 0; |
---|
| 3224 | +} |
---|
| 3225 | + |
---|
| 3226 | +int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, |
---|
| 3227 | + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) |
---|
2317 | 3228 | { |
---|
2318 | 3229 | #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 |
---|
2319 | | - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; |
---|
2320 | | - int recv_pkts, recv_bds, clean_count, err; |
---|
2321 | 3230 | int unused_count = hns3_desc_unused(ring); |
---|
2322 | | - struct sk_buff *skb = NULL; |
---|
2323 | | - int num, bnum = 0; |
---|
| 3231 | + bool failure = false; |
---|
| 3232 | + int recv_pkts = 0; |
---|
| 3233 | + int err; |
---|
2324 | 3234 | |
---|
2325 | | - num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); |
---|
2326 | | - rmb(); /* Make sure num taken effect before the other data is touched */ |
---|
| 3235 | + unused_count -= ring->pending_buf; |
---|
2327 | 3236 | |
---|
2328 | | - recv_pkts = 0, recv_bds = 0, clean_count = 0; |
---|
2329 | | - num -= unused_count; |
---|
2330 | | - |
---|
2331 | | - while (recv_pkts < budget && recv_bds < num) { |
---|
| 3237 | + while (recv_pkts < budget) { |
---|
2332 | 3238 | /* Reuse or realloc buffers */ |
---|
2333 | | - if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { |
---|
2334 | | - hns3_nic_alloc_rx_buffers(ring, |
---|
2335 | | - clean_count + unused_count); |
---|
2336 | | - clean_count = 0; |
---|
2337 | | - unused_count = hns3_desc_unused(ring); |
---|
| 3239 | + if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { |
---|
| 3240 | + failure = failure || |
---|
| 3241 | + hns3_nic_alloc_rx_buffers(ring, unused_count); |
---|
| 3242 | + unused_count = 0; |
---|
2338 | 3243 | } |
---|
2339 | 3244 | |
---|
2340 | 3245 | /* Poll one pkt */ |
---|
2341 | | - err = hns3_handle_rx_bd(ring, &skb, &bnum); |
---|
2342 | | - if (unlikely(!skb)) /* This fault cannot be repaired */ |
---|
| 3246 | + err = hns3_handle_rx_bd(ring); |
---|
| 3247 | + /* Do not get FE for the packet or failed to alloc skb */ |
---|
| 3248 | + if (unlikely(!ring->skb || err == -ENXIO)) { |
---|
2343 | 3249 | goto out; |
---|
2344 | | - |
---|
2345 | | - recv_bds += bnum; |
---|
2346 | | - clean_count += bnum; |
---|
2347 | | - if (unlikely(err)) { /* Do jump the err */ |
---|
| 3250 | + } else if (likely(!err)) { |
---|
| 3251 | + rx_fn(ring, ring->skb); |
---|
2348 | 3252 | recv_pkts++; |
---|
2349 | | - continue; |
---|
2350 | 3253 | } |
---|
2351 | 3254 | |
---|
2352 | | - /* Do update ip stack process */ |
---|
2353 | | - skb->protocol = eth_type_trans(skb, netdev); |
---|
2354 | | - rx_fn(ring, skb); |
---|
2355 | | - |
---|
2356 | | - recv_pkts++; |
---|
| 3255 | + unused_count += ring->pending_buf; |
---|
| 3256 | + ring->skb = NULL; |
---|
| 3257 | + ring->pending_buf = 0; |
---|
2357 | 3258 | } |
---|
2358 | 3259 | |
---|
2359 | 3260 | out: |
---|
2360 | | - /* Make all data has been write before submit */ |
---|
2361 | | - if (clean_count + unused_count > 0) |
---|
2362 | | - hns3_nic_alloc_rx_buffers(ring, |
---|
2363 | | - clean_count + unused_count); |
---|
2364 | | - |
---|
2365 | | - return recv_pkts; |
---|
| 3261 | + return failure ? budget : recv_pkts; |
---|
2366 | 3262 | } |
---|
2367 | 3263 | |
---|
2368 | | -static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) |
---|
| 3264 | +static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group) |
---|
2369 | 3265 | { |
---|
2370 | | - struct hns3_enet_tqp_vector *tqp_vector = |
---|
2371 | | - ring_group->ring->tqp_vector; |
---|
| 3266 | +#define HNS3_RX_LOW_BYTE_RATE 10000 |
---|
| 3267 | +#define HNS3_RX_MID_BYTE_RATE 20000 |
---|
| 3268 | +#define HNS3_RX_ULTRA_PACKET_RATE 40 |
---|
| 3269 | + |
---|
2372 | 3270 | enum hns3_flow_level_range new_flow_level; |
---|
2373 | | - int packets_per_msecs; |
---|
2374 | | - int bytes_per_msecs; |
---|
| 3271 | + struct hns3_enet_tqp_vector *tqp_vector; |
---|
| 3272 | + int packets_per_msecs, bytes_per_msecs; |
---|
2375 | 3273 | u32 time_passed_ms; |
---|
2376 | | - u16 new_int_gl; |
---|
2377 | 3274 | |
---|
2378 | | - if (!tqp_vector->last_jiffies) |
---|
2379 | | - return false; |
---|
2380 | | - |
---|
2381 | | - if (ring_group->total_packets == 0) { |
---|
2382 | | - ring_group->coal.int_gl = HNS3_INT_GL_50K; |
---|
2383 | | - ring_group->coal.flow_level = HNS3_FLOW_LOW; |
---|
2384 | | - return true; |
---|
2385 | | - } |
---|
2386 | | - |
---|
2387 | | - /* Simple throttlerate management |
---|
2388 | | - * 0-10MB/s lower (50000 ints/s) |
---|
2389 | | - * 10-20MB/s middle (20000 ints/s) |
---|
2390 | | - * 20-1249MB/s high (18000 ints/s) |
---|
2391 | | - * > 40000pps ultra (8000 ints/s) |
---|
2392 | | - */ |
---|
2393 | | - new_flow_level = ring_group->coal.flow_level; |
---|
2394 | | - new_int_gl = ring_group->coal.int_gl; |
---|
| 3275 | + tqp_vector = ring_group->ring->tqp_vector; |
---|
2395 | 3276 | time_passed_ms = |
---|
2396 | 3277 | jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); |
---|
2397 | | - |
---|
2398 | 3278 | if (!time_passed_ms) |
---|
2399 | 3279 | return false; |
---|
2400 | 3280 | |
---|
.. | .. |
---|
2404 | 3284 | do_div(ring_group->total_bytes, time_passed_ms); |
---|
2405 | 3285 | bytes_per_msecs = ring_group->total_bytes; |
---|
2406 | 3286 | |
---|
2407 | | -#define HNS3_RX_LOW_BYTE_RATE 10000 |
---|
2408 | | -#define HNS3_RX_MID_BYTE_RATE 20000 |
---|
| 3287 | + new_flow_level = ring_group->coal.flow_level; |
---|
2409 | 3288 | |
---|
| 3289 | + /* Simple throttlerate management |
---|
| 3290 | + * 0-10MB/s lower (50000 ints/s) |
---|
| 3291 | + * 10-20MB/s middle (20000 ints/s) |
---|
| 3292 | + * 20-1249MB/s high (18000 ints/s) |
---|
| 3293 | + * > 40000pps ultra (8000 ints/s) |
---|
| 3294 | + */ |
---|
2410 | 3295 | switch (new_flow_level) { |
---|
2411 | 3296 | case HNS3_FLOW_LOW: |
---|
2412 | 3297 | if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) |
---|
.. | .. |
---|
2426 | 3311 | break; |
---|
2427 | 3312 | } |
---|
2428 | 3313 | |
---|
2429 | | -#define HNS3_RX_ULTRA_PACKET_RATE 40 |
---|
2430 | | - |
---|
2431 | 3314 | if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && |
---|
2432 | 3315 | &tqp_vector->rx_group == ring_group) |
---|
2433 | 3316 | new_flow_level = HNS3_FLOW_ULTRA; |
---|
2434 | 3317 | |
---|
2435 | | - switch (new_flow_level) { |
---|
| 3318 | + ring_group->total_bytes = 0; |
---|
| 3319 | + ring_group->total_packets = 0; |
---|
| 3320 | + ring_group->coal.flow_level = new_flow_level; |
---|
| 3321 | + |
---|
| 3322 | + return true; |
---|
| 3323 | +} |
---|
| 3324 | + |
---|
| 3325 | +static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) |
---|
| 3326 | +{ |
---|
| 3327 | + struct hns3_enet_tqp_vector *tqp_vector; |
---|
| 3328 | + u16 new_int_gl; |
---|
| 3329 | + |
---|
| 3330 | + if (!ring_group->ring) |
---|
| 3331 | + return false; |
---|
| 3332 | + |
---|
| 3333 | + tqp_vector = ring_group->ring->tqp_vector; |
---|
| 3334 | + if (!tqp_vector->last_jiffies) |
---|
| 3335 | + return false; |
---|
| 3336 | + |
---|
| 3337 | + if (ring_group->total_packets == 0) { |
---|
| 3338 | + ring_group->coal.int_gl = HNS3_INT_GL_50K; |
---|
| 3339 | + ring_group->coal.flow_level = HNS3_FLOW_LOW; |
---|
| 3340 | + return true; |
---|
| 3341 | + } |
---|
| 3342 | + |
---|
| 3343 | + if (!hns3_get_new_flow_lvl(ring_group)) |
---|
| 3344 | + return false; |
---|
| 3345 | + |
---|
| 3346 | + new_int_gl = ring_group->coal.int_gl; |
---|
| 3347 | + switch (ring_group->coal.flow_level) { |
---|
2436 | 3348 | case HNS3_FLOW_LOW: |
---|
2437 | 3349 | new_int_gl = HNS3_INT_GL_50K; |
---|
2438 | 3350 | break; |
---|
.. | .. |
---|
2449 | 3361 | break; |
---|
2450 | 3362 | } |
---|
2451 | 3363 | |
---|
2452 | | - ring_group->total_bytes = 0; |
---|
2453 | | - ring_group->total_packets = 0; |
---|
2454 | | - ring_group->coal.flow_level = new_flow_level; |
---|
2455 | 3364 | if (new_int_gl != ring_group->coal.int_gl) { |
---|
2456 | 3365 | ring_group->coal.int_gl = new_int_gl; |
---|
2457 | 3366 | return true; |
---|
.. | .. |
---|
2465 | 3374 | struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; |
---|
2466 | 3375 | bool rx_update, tx_update; |
---|
2467 | 3376 | |
---|
2468 | | - if (tqp_vector->int_adapt_down > 0) { |
---|
2469 | | - tqp_vector->int_adapt_down--; |
---|
| 3377 | + /* update param every 1000ms */ |
---|
| 3378 | + if (time_before(jiffies, |
---|
| 3379 | + tqp_vector->last_jiffies + msecs_to_jiffies(1000))) |
---|
2470 | 3380 | return; |
---|
2471 | | - } |
---|
2472 | 3381 | |
---|
2473 | 3382 | if (rx_group->coal.gl_adapt_enable) { |
---|
2474 | 3383 | rx_update = hns3_get_new_int_gl(rx_group); |
---|
.. | .. |
---|
2478 | 3387 | } |
---|
2479 | 3388 | |
---|
2480 | 3389 | if (tx_group->coal.gl_adapt_enable) { |
---|
2481 | | - tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); |
---|
| 3390 | + tx_update = hns3_get_new_int_gl(tx_group); |
---|
2482 | 3391 | if (tx_update) |
---|
2483 | 3392 | hns3_set_vector_coalesce_tx_gl(tqp_vector, |
---|
2484 | 3393 | tx_group->coal.int_gl); |
---|
2485 | 3394 | } |
---|
2486 | 3395 | |
---|
2487 | 3396 | tqp_vector->last_jiffies = jiffies; |
---|
2488 | | - tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; |
---|
2489 | 3397 | } |
---|
2490 | 3398 | |
---|
2491 | 3399 | static int hns3_nic_common_poll(struct napi_struct *napi, int budget) |
---|
2492 | 3400 | { |
---|
| 3401 | + struct hns3_nic_priv *priv = netdev_priv(napi->dev); |
---|
2493 | 3402 | struct hns3_enet_ring *ring; |
---|
2494 | 3403 | int rx_pkt_total = 0; |
---|
2495 | 3404 | |
---|
2496 | 3405 | struct hns3_enet_tqp_vector *tqp_vector = |
---|
2497 | 3406 | container_of(napi, struct hns3_enet_tqp_vector, napi); |
---|
2498 | 3407 | bool clean_complete = true; |
---|
2499 | | - int rx_budget; |
---|
| 3408 | + int rx_budget = budget; |
---|
| 3409 | + |
---|
| 3410 | + if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { |
---|
| 3411 | + napi_complete(napi); |
---|
| 3412 | + return 0; |
---|
| 3413 | + } |
---|
2500 | 3414 | |
---|
2501 | 3415 | /* Since the actual Tx work is minimal, we can give the Tx a larger |
---|
2502 | 3416 | * budget and be more aggressive about cleaning up the Tx descriptors. |
---|
2503 | 3417 | */ |
---|
2504 | | - hns3_for_each_ring(ring, tqp_vector->tx_group) { |
---|
2505 | | - if (!hns3_clean_tx_ring(ring, budget)) |
---|
2506 | | - clean_complete = false; |
---|
2507 | | - } |
---|
| 3418 | + hns3_for_each_ring(ring, tqp_vector->tx_group) |
---|
| 3419 | + hns3_clean_tx_ring(ring, budget); |
---|
2508 | 3420 | |
---|
2509 | 3421 | /* make sure rx ring budget not smaller than 1 */ |
---|
2510 | | - rx_budget = max(budget / tqp_vector->num_tqps, 1); |
---|
| 3422 | + if (tqp_vector->num_tqps > 1) |
---|
| 3423 | + rx_budget = max(budget / tqp_vector->num_tqps, 1); |
---|
2511 | 3424 | |
---|
2512 | 3425 | hns3_for_each_ring(ring, tqp_vector->rx_group) { |
---|
2513 | 3426 | int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, |
---|
.. | .. |
---|
2524 | 3437 | if (!clean_complete) |
---|
2525 | 3438 | return budget; |
---|
2526 | 3439 | |
---|
2527 | | - napi_complete(napi); |
---|
2528 | | - hns3_update_new_int_gl(tqp_vector); |
---|
2529 | | - hns3_mask_vector_irq(tqp_vector, 1); |
---|
| 3440 | + if (napi_complete(napi) && |
---|
| 3441 | + likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { |
---|
| 3442 | + hns3_update_new_int_gl(tqp_vector); |
---|
| 3443 | + hns3_mask_vector_irq(tqp_vector, 1); |
---|
| 3444 | + } |
---|
2530 | 3445 | |
---|
2531 | 3446 | return rx_pkt_total; |
---|
2532 | 3447 | } |
---|
.. | .. |
---|
2638 | 3553 | group->count++; |
---|
2639 | 3554 | } |
---|
2640 | 3555 | |
---|
| 3556 | +static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) |
---|
| 3557 | +{ |
---|
| 3558 | + struct pci_dev *pdev = priv->ae_handle->pdev; |
---|
| 3559 | + struct hns3_enet_tqp_vector *tqp_vector; |
---|
| 3560 | + int num_vectors = priv->vector_num; |
---|
| 3561 | + int numa_node; |
---|
| 3562 | + int vector_i; |
---|
| 3563 | + |
---|
| 3564 | + numa_node = dev_to_node(&pdev->dev); |
---|
| 3565 | + |
---|
| 3566 | + for (vector_i = 0; vector_i < num_vectors; vector_i++) { |
---|
| 3567 | + tqp_vector = &priv->tqp_vector[vector_i]; |
---|
| 3568 | + cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), |
---|
| 3569 | + &tqp_vector->affinity_mask); |
---|
| 3570 | + } |
---|
| 3571 | +} |
---|
| 3572 | + |
---|
2641 | 3573 | static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) |
---|
2642 | 3574 | { |
---|
2643 | 3575 | struct hnae3_handle *h = priv->ae_handle; |
---|
2644 | 3576 | struct hns3_enet_tqp_vector *tqp_vector; |
---|
2645 | | - int ret = 0; |
---|
| 3577 | + int ret; |
---|
2646 | 3578 | int i; |
---|
| 3579 | + |
---|
| 3580 | + hns3_nic_set_cpumask(priv); |
---|
2647 | 3581 | |
---|
2648 | 3582 | for (i = 0; i < priv->vector_num; i++) { |
---|
2649 | 3583 | tqp_vector = &priv->tqp_vector[i]; |
---|
.. | .. |
---|
2658 | 3592 | tqp_vector = &priv->tqp_vector[vector_i]; |
---|
2659 | 3593 | |
---|
2660 | 3594 | hns3_add_ring_to_group(&tqp_vector->tx_group, |
---|
2661 | | - priv->ring_data[i].ring); |
---|
| 3595 | + &priv->ring[i]); |
---|
2662 | 3596 | |
---|
2663 | 3597 | hns3_add_ring_to_group(&tqp_vector->rx_group, |
---|
2664 | | - priv->ring_data[i + tqp_num].ring); |
---|
| 3598 | + &priv->ring[i + tqp_num]); |
---|
2665 | 3599 | |
---|
2666 | | - priv->ring_data[i].ring->tqp_vector = tqp_vector; |
---|
2667 | | - priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; |
---|
| 3600 | + priv->ring[i].tqp_vector = tqp_vector; |
---|
| 3601 | + priv->ring[i + tqp_num].tqp_vector = tqp_vector; |
---|
2668 | 3602 | tqp_vector->num_tqps++; |
---|
2669 | 3603 | } |
---|
2670 | 3604 | |
---|
.. | .. |
---|
2728 | 3662 | if (!vector) |
---|
2729 | 3663 | return -ENOMEM; |
---|
2730 | 3664 | |
---|
| 3665 | + /* save the actual available vector number */ |
---|
2731 | 3666 | vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); |
---|
2732 | 3667 | |
---|
2733 | 3668 | priv->vector_num = vector_num; |
---|
.. | .. |
---|
2758 | 3693 | group->count = 0; |
---|
2759 | 3694 | } |
---|
2760 | 3695 | |
---|
2761 | | -static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) |
---|
| 3696 | +static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) |
---|
2762 | 3697 | { |
---|
2763 | 3698 | struct hnae3_ring_chain_node vector_ring_chain; |
---|
2764 | 3699 | struct hnae3_handle *h = priv->ae_handle; |
---|
2765 | 3700 | struct hns3_enet_tqp_vector *tqp_vector; |
---|
2766 | | - int i, ret; |
---|
| 3701 | + int i; |
---|
2767 | 3702 | |
---|
2768 | 3703 | for (i = 0; i < priv->vector_num; i++) { |
---|
2769 | 3704 | tqp_vector = &priv->tqp_vector[i]; |
---|
2770 | 3705 | |
---|
2771 | | - ret = hns3_get_vector_ring_chain(tqp_vector, |
---|
2772 | | - &vector_ring_chain); |
---|
2773 | | - if (ret) |
---|
2774 | | - return ret; |
---|
| 3706 | + if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) |
---|
| 3707 | + continue; |
---|
2775 | 3708 | |
---|
2776 | | - ret = h->ae_algo->ops->unmap_ring_from_vector(h, |
---|
| 3709 | + /* Since the mapping can be overwritten, when fail to get the |
---|
| 3710 | + * chain between vector and ring, we should go on to deal with |
---|
| 3711 | + * the remaining options. |
---|
| 3712 | + */ |
---|
| 3713 | + if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) |
---|
| 3714 | + dev_warn(priv->dev, "failed to get ring chain\n"); |
---|
| 3715 | + |
---|
| 3716 | + h->ae_algo->ops->unmap_ring_from_vector(h, |
---|
2777 | 3717 | tqp_vector->vector_irq, &vector_ring_chain); |
---|
2778 | | - if (ret) |
---|
2779 | | - return ret; |
---|
2780 | 3718 | |
---|
2781 | 3719 | hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); |
---|
2782 | 3720 | |
---|
2783 | | - if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) { |
---|
2784 | | - irq_set_affinity_notifier(tqp_vector->vector_irq, |
---|
2785 | | - NULL); |
---|
2786 | | - irq_set_affinity_hint(tqp_vector->vector_irq, NULL); |
---|
2787 | | - free_irq(tqp_vector->vector_irq, tqp_vector); |
---|
2788 | | - tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED; |
---|
2789 | | - } |
---|
2790 | | - |
---|
2791 | | - priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; |
---|
2792 | 3721 | hns3_clear_ring_group(&tqp_vector->rx_group); |
---|
2793 | 3722 | hns3_clear_ring_group(&tqp_vector->tx_group); |
---|
2794 | 3723 | netif_napi_del(&priv->tqp_vector[i].napi); |
---|
2795 | 3724 | } |
---|
2796 | | - |
---|
2797 | | - return 0; |
---|
2798 | 3725 | } |
---|
2799 | 3726 | |
---|
2800 | | -static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) |
---|
| 3727 | +static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) |
---|
2801 | 3728 | { |
---|
2802 | 3729 | struct hnae3_handle *h = priv->ae_handle; |
---|
2803 | 3730 | struct pci_dev *pdev = h->pdev; |
---|
.. | .. |
---|
2809 | 3736 | tqp_vector = &priv->tqp_vector[i]; |
---|
2810 | 3737 | ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); |
---|
2811 | 3738 | if (ret) |
---|
2812 | | - return ret; |
---|
| 3739 | + return; |
---|
2813 | 3740 | } |
---|
2814 | 3741 | |
---|
2815 | 3742 | devm_kfree(&pdev->dev, priv->tqp_vector); |
---|
2816 | | - return 0; |
---|
2817 | 3743 | } |
---|
2818 | 3744 | |
---|
2819 | | -static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, |
---|
2820 | | - int ring_type) |
---|
| 3745 | +static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, |
---|
| 3746 | + unsigned int ring_type) |
---|
2821 | 3747 | { |
---|
2822 | | - struct hns3_nic_ring_data *ring_data = priv->ring_data; |
---|
2823 | 3748 | int queue_num = priv->ae_handle->kinfo.num_tqps; |
---|
2824 | | - struct pci_dev *pdev = priv->ae_handle->pdev; |
---|
2825 | 3749 | struct hns3_enet_ring *ring; |
---|
2826 | | - |
---|
2827 | | - ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); |
---|
2828 | | - if (!ring) |
---|
2829 | | - return -ENOMEM; |
---|
| 3750 | + int desc_num; |
---|
2830 | 3751 | |
---|
2831 | 3752 | if (ring_type == HNAE3_RING_TYPE_TX) { |
---|
2832 | | - ring_data[q->tqp_index].ring = ring; |
---|
2833 | | - ring_data[q->tqp_index].queue_index = q->tqp_index; |
---|
2834 | | - ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; |
---|
| 3753 | + ring = &priv->ring[q->tqp_index]; |
---|
| 3754 | + desc_num = priv->ae_handle->kinfo.num_tx_desc; |
---|
| 3755 | + ring->queue_index = q->tqp_index; |
---|
2835 | 3756 | } else { |
---|
2836 | | - ring_data[q->tqp_index + queue_num].ring = ring; |
---|
2837 | | - ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; |
---|
2838 | | - ring->io_base = q->io_base; |
---|
| 3757 | + ring = &priv->ring[q->tqp_index + queue_num]; |
---|
| 3758 | + desc_num = priv->ae_handle->kinfo.num_rx_desc; |
---|
| 3759 | + ring->queue_index = q->tqp_index; |
---|
2839 | 3760 | } |
---|
2840 | 3761 | |
---|
2841 | 3762 | hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); |
---|
.. | .. |
---|
2846 | 3767 | ring->dev = priv->dev; |
---|
2847 | 3768 | ring->desc_dma_addr = 0; |
---|
2848 | 3769 | ring->buf_size = q->buf_size; |
---|
2849 | | - ring->desc_num = q->desc_num; |
---|
| 3770 | + ring->desc_num = desc_num; |
---|
2850 | 3771 | ring->next_to_use = 0; |
---|
2851 | 3772 | ring->next_to_clean = 0; |
---|
2852 | | - |
---|
2853 | | - return 0; |
---|
| 3773 | + ring->last_to_use = 0; |
---|
2854 | 3774 | } |
---|
2855 | 3775 | |
---|
2856 | | -static int hns3_queue_to_ring(struct hnae3_queue *tqp, |
---|
2857 | | - struct hns3_nic_priv *priv) |
---|
| 3776 | +static void hns3_queue_to_ring(struct hnae3_queue *tqp, |
---|
| 3777 | + struct hns3_nic_priv *priv) |
---|
2858 | 3778 | { |
---|
2859 | | - int ret; |
---|
2860 | | - |
---|
2861 | | - ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); |
---|
2862 | | - if (ret) |
---|
2863 | | - return ret; |
---|
2864 | | - |
---|
2865 | | - ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); |
---|
2866 | | - if (ret) { |
---|
2867 | | - devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring); |
---|
2868 | | - return ret; |
---|
2869 | | - } |
---|
2870 | | - |
---|
2871 | | - return 0; |
---|
| 3779 | + hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); |
---|
| 3780 | + hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); |
---|
2872 | 3781 | } |
---|
2873 | 3782 | |
---|
2874 | 3783 | static int hns3_get_ring_config(struct hns3_nic_priv *priv) |
---|
2875 | 3784 | { |
---|
2876 | 3785 | struct hnae3_handle *h = priv->ae_handle; |
---|
2877 | 3786 | struct pci_dev *pdev = h->pdev; |
---|
2878 | | - int i, ret; |
---|
| 3787 | + int i; |
---|
2879 | 3788 | |
---|
2880 | | - priv->ring_data = devm_kzalloc(&pdev->dev, |
---|
2881 | | - array3_size(h->kinfo.num_tqps, |
---|
2882 | | - sizeof(*priv->ring_data), |
---|
2883 | | - 2), |
---|
2884 | | - GFP_KERNEL); |
---|
2885 | | - if (!priv->ring_data) |
---|
| 3789 | + priv->ring = devm_kzalloc(&pdev->dev, |
---|
| 3790 | + array3_size(h->kinfo.num_tqps, |
---|
| 3791 | + sizeof(*priv->ring), 2), |
---|
| 3792 | + GFP_KERNEL); |
---|
| 3793 | + if (!priv->ring) |
---|
2886 | 3794 | return -ENOMEM; |
---|
2887 | 3795 | |
---|
2888 | | - for (i = 0; i < h->kinfo.num_tqps; i++) { |
---|
2889 | | - ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); |
---|
2890 | | - if (ret) |
---|
2891 | | - goto err; |
---|
2892 | | - } |
---|
| 3796 | + for (i = 0; i < h->kinfo.num_tqps; i++) |
---|
| 3797 | + hns3_queue_to_ring(h->kinfo.tqp[i], priv); |
---|
2893 | 3798 | |
---|
2894 | 3799 | return 0; |
---|
2895 | | -err: |
---|
2896 | | - while (i--) { |
---|
2897 | | - devm_kfree(priv->dev, priv->ring_data[i].ring); |
---|
2898 | | - devm_kfree(priv->dev, |
---|
2899 | | - priv->ring_data[i + h->kinfo.num_tqps].ring); |
---|
2900 | | - } |
---|
2901 | | - |
---|
2902 | | - devm_kfree(&pdev->dev, priv->ring_data); |
---|
2903 | | - return ret; |
---|
2904 | 3800 | } |
---|
2905 | 3801 | |
---|
2906 | 3802 | static void hns3_put_ring_config(struct hns3_nic_priv *priv) |
---|
2907 | 3803 | { |
---|
2908 | | - struct hnae3_handle *h = priv->ae_handle; |
---|
2909 | | - int i; |
---|
| 3804 | + if (!priv->ring) |
---|
| 3805 | + return; |
---|
2910 | 3806 | |
---|
2911 | | - for (i = 0; i < h->kinfo.num_tqps; i++) { |
---|
2912 | | - devm_kfree(priv->dev, priv->ring_data[i].ring); |
---|
2913 | | - devm_kfree(priv->dev, |
---|
2914 | | - priv->ring_data[i + h->kinfo.num_tqps].ring); |
---|
2915 | | - } |
---|
2916 | | - devm_kfree(priv->dev, priv->ring_data); |
---|
| 3807 | + devm_kfree(priv->dev, priv->ring); |
---|
| 3808 | + priv->ring = NULL; |
---|
2917 | 3809 | } |
---|
2918 | 3810 | |
---|
2919 | 3811 | static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) |
---|
.. | .. |
---|
2923 | 3815 | if (ring->desc_num <= 0 || ring->buf_size <= 0) |
---|
2924 | 3816 | return -EINVAL; |
---|
2925 | 3817 | |
---|
2926 | | - ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), |
---|
2927 | | - GFP_KERNEL); |
---|
| 3818 | + ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, |
---|
| 3819 | + sizeof(ring->desc_cb[0]), GFP_KERNEL); |
---|
2928 | 3820 | if (!ring->desc_cb) { |
---|
2929 | 3821 | ret = -ENOMEM; |
---|
2930 | 3822 | goto out; |
---|
.. | .. |
---|
2945 | 3837 | out_with_desc: |
---|
2946 | 3838 | hns3_free_desc(ring); |
---|
2947 | 3839 | out_with_desc_cb: |
---|
2948 | | - kfree(ring->desc_cb); |
---|
| 3840 | + devm_kfree(ring_to_dev(ring), ring->desc_cb); |
---|
2949 | 3841 | ring->desc_cb = NULL; |
---|
2950 | 3842 | out: |
---|
2951 | 3843 | return ret; |
---|
2952 | 3844 | } |
---|
2953 | 3845 | |
---|
2954 | | -static void hns3_fini_ring(struct hns3_enet_ring *ring) |
---|
| 3846 | +void hns3_fini_ring(struct hns3_enet_ring *ring) |
---|
2955 | 3847 | { |
---|
2956 | 3848 | hns3_free_desc(ring); |
---|
2957 | | - kfree(ring->desc_cb); |
---|
| 3849 | + devm_kfree(ring_to_dev(ring), ring->desc_cb); |
---|
2958 | 3850 | ring->desc_cb = NULL; |
---|
2959 | 3851 | ring->next_to_clean = 0; |
---|
2960 | 3852 | ring->next_to_use = 0; |
---|
| 3853 | + ring->last_to_use = 0; |
---|
| 3854 | + ring->pending_buf = 0; |
---|
| 3855 | + if (ring->skb) { |
---|
| 3856 | + dev_kfree_skb_any(ring->skb); |
---|
| 3857 | + ring->skb = NULL; |
---|
| 3858 | + } |
---|
2961 | 3859 | } |
---|
2962 | 3860 | |
---|
2963 | 3861 | static int hns3_buf_size2type(u32 buf_size) |
---|
.. | .. |
---|
2990 | 3888 | struct hnae3_queue *q = ring->tqp; |
---|
2991 | 3889 | |
---|
2992 | 3890 | if (!HNAE3_IS_TX_RING(ring)) { |
---|
2993 | | - hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, |
---|
2994 | | - (u32)dma); |
---|
| 3891 | + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); |
---|
2995 | 3892 | hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, |
---|
2996 | 3893 | (u32)((dma >> 31) >> 1)); |
---|
2997 | 3894 | |
---|
.. | .. |
---|
3026 | 3923 | for (j = 0; j < tc_info->tqp_count; j++) { |
---|
3027 | 3924 | struct hnae3_queue *q; |
---|
3028 | 3925 | |
---|
3029 | | - q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp; |
---|
| 3926 | + q = priv->ring[tc_info->tqp_offset + j].tqp; |
---|
3030 | 3927 | hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, |
---|
3031 | 3928 | tc_info->tc); |
---|
3032 | 3929 | } |
---|
.. | .. |
---|
3041 | 3938 | int ret; |
---|
3042 | 3939 | |
---|
3043 | 3940 | for (i = 0; i < ring_num; i++) { |
---|
3044 | | - ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); |
---|
| 3941 | + ret = hns3_alloc_ring_memory(&priv->ring[i]); |
---|
3045 | 3942 | if (ret) { |
---|
3046 | 3943 | dev_err(priv->dev, |
---|
3047 | 3944 | "Alloc ring memory fail! ret=%d\n", ret); |
---|
3048 | 3945 | goto out_when_alloc_ring_memory; |
---|
3049 | 3946 | } |
---|
3050 | 3947 | |
---|
3051 | | - u64_stats_init(&priv->ring_data[i].ring->syncp); |
---|
| 3948 | + u64_stats_init(&priv->ring[i].syncp); |
---|
3052 | 3949 | } |
---|
3053 | 3950 | |
---|
3054 | 3951 | return 0; |
---|
3055 | 3952 | |
---|
3056 | 3953 | out_when_alloc_ring_memory: |
---|
3057 | 3954 | for (j = i - 1; j >= 0; j--) |
---|
3058 | | - hns3_fini_ring(priv->ring_data[j].ring); |
---|
| 3955 | + hns3_fini_ring(&priv->ring[j]); |
---|
3059 | 3956 | |
---|
3060 | 3957 | return -ENOMEM; |
---|
3061 | 3958 | } |
---|
.. | .. |
---|
3066 | 3963 | int i; |
---|
3067 | 3964 | |
---|
3068 | 3965 | for (i = 0; i < h->kinfo.num_tqps; i++) { |
---|
3069 | | - if (h->ae_algo->ops->reset_queue) |
---|
3070 | | - h->ae_algo->ops->reset_queue(h, i); |
---|
3071 | | - |
---|
3072 | | - hns3_fini_ring(priv->ring_data[i].ring); |
---|
3073 | | - hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); |
---|
| 3966 | + hns3_fini_ring(&priv->ring[i]); |
---|
| 3967 | + hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); |
---|
3074 | 3968 | } |
---|
3075 | 3969 | return 0; |
---|
3076 | 3970 | } |
---|
3077 | 3971 | |
---|
3078 | 3972 | /* Set mac addr if it is configured. or leave it to the AE driver */ |
---|
3079 | | -static void hns3_init_mac_addr(struct net_device *netdev, bool init) |
---|
| 3973 | +static int hns3_init_mac_addr(struct net_device *netdev) |
---|
3080 | 3974 | { |
---|
3081 | 3975 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
3082 | 3976 | struct hnae3_handle *h = priv->ae_handle; |
---|
3083 | 3977 | u8 mac_addr_temp[ETH_ALEN]; |
---|
| 3978 | + int ret = 0; |
---|
3084 | 3979 | |
---|
3085 | | - if (h->ae_algo->ops->get_mac_addr && init) { |
---|
| 3980 | + if (h->ae_algo->ops->get_mac_addr) |
---|
3086 | 3981 | h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); |
---|
3087 | | - ether_addr_copy(netdev->dev_addr, mac_addr_temp); |
---|
3088 | | - } |
---|
3089 | 3982 | |
---|
3090 | 3983 | /* Check if the MAC address is valid, if not get a random one */ |
---|
3091 | | - if (!is_valid_ether_addr(netdev->dev_addr)) { |
---|
| 3984 | + if (!is_valid_ether_addr(mac_addr_temp)) { |
---|
3092 | 3985 | eth_hw_addr_random(netdev); |
---|
3093 | 3986 | dev_warn(priv->dev, "using random MAC address %pM\n", |
---|
3094 | 3987 | netdev->dev_addr); |
---|
| 3988 | + } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { |
---|
| 3989 | + ether_addr_copy(netdev->dev_addr, mac_addr_temp); |
---|
| 3990 | + ether_addr_copy(netdev->perm_addr, mac_addr_temp); |
---|
| 3991 | + } else { |
---|
| 3992 | + return 0; |
---|
3095 | 3993 | } |
---|
3096 | 3994 | |
---|
3097 | 3995 | if (h->ae_algo->ops->set_mac_addr) |
---|
3098 | | - h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); |
---|
| 3996 | + ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); |
---|
3099 | 3997 | |
---|
| 3998 | + return ret; |
---|
3100 | 3999 | } |
---|
3101 | 4000 | |
---|
3102 | | -static void hns3_uninit_mac_addr(struct net_device *netdev) |
---|
| 4001 | +static int hns3_init_phy(struct net_device *netdev) |
---|
3103 | 4002 | { |
---|
3104 | | - struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
3105 | | - struct hnae3_handle *h = priv->ae_handle; |
---|
| 4003 | + struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
| 4004 | + int ret = 0; |
---|
3106 | 4005 | |
---|
3107 | | - if (h->ae_algo->ops->rm_uc_addr) |
---|
3108 | | - h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr); |
---|
| 4006 | + if (h->ae_algo->ops->mac_connect_phy) |
---|
| 4007 | + ret = h->ae_algo->ops->mac_connect_phy(h); |
---|
| 4008 | + |
---|
| 4009 | + return ret; |
---|
3109 | 4010 | } |
---|
3110 | 4011 | |
---|
3111 | | -static void hns3_nic_set_priv_ops(struct net_device *netdev) |
---|
| 4012 | +static void hns3_uninit_phy(struct net_device *netdev) |
---|
3112 | 4013 | { |
---|
3113 | | - struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
| 4014 | + struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
3114 | 4015 | |
---|
3115 | | - if ((netdev->features & NETIF_F_TSO) || |
---|
3116 | | - (netdev->features & NETIF_F_TSO6)) { |
---|
3117 | | - priv->ops.fill_desc = hns3_fill_desc_tso; |
---|
3118 | | - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; |
---|
3119 | | - } else { |
---|
3120 | | - priv->ops.fill_desc = hns3_fill_desc; |
---|
3121 | | - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; |
---|
3122 | | - } |
---|
| 4016 | + if (h->ae_algo->ops->mac_disconnect_phy) |
---|
| 4017 | + h->ae_algo->ops->mac_disconnect_phy(h); |
---|
| 4018 | +} |
---|
| 4019 | + |
---|
| 4020 | +static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) |
---|
| 4021 | +{ |
---|
| 4022 | + struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
| 4023 | + |
---|
| 4024 | + if (h->ae_algo->ops->del_all_fd_entries) |
---|
| 4025 | + h->ae_algo->ops->del_all_fd_entries(h, clear_list); |
---|
| 4026 | +} |
---|
| 4027 | + |
---|
| 4028 | +static int hns3_client_start(struct hnae3_handle *handle) |
---|
| 4029 | +{ |
---|
| 4030 | + if (!handle->ae_algo->ops->client_start) |
---|
| 4031 | + return 0; |
---|
| 4032 | + |
---|
| 4033 | + return handle->ae_algo->ops->client_start(handle); |
---|
| 4034 | +} |
---|
| 4035 | + |
---|
| 4036 | +static void hns3_client_stop(struct hnae3_handle *handle) |
---|
| 4037 | +{ |
---|
| 4038 | + if (!handle->ae_algo->ops->client_stop) |
---|
| 4039 | + return; |
---|
| 4040 | + |
---|
| 4041 | + handle->ae_algo->ops->client_stop(handle); |
---|
| 4042 | +} |
---|
| 4043 | + |
---|
| 4044 | +static void hns3_info_show(struct hns3_nic_priv *priv) |
---|
| 4045 | +{ |
---|
| 4046 | + struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; |
---|
| 4047 | + |
---|
| 4048 | + dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); |
---|
| 4049 | + dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); |
---|
| 4050 | + dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); |
---|
| 4051 | + dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); |
---|
| 4052 | + dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); |
---|
| 4053 | + dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); |
---|
| 4054 | + dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); |
---|
| 4055 | + dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); |
---|
| 4056 | + dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); |
---|
3123 | 4057 | } |
---|
3124 | 4058 | |
---|
3125 | 4059 | static int hns3_client_init(struct hnae3_handle *handle) |
---|
3126 | 4060 | { |
---|
3127 | 4061 | struct pci_dev *pdev = handle->pdev; |
---|
| 4062 | + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
---|
| 4063 | + u16 alloc_tqps, max_rss_size; |
---|
3128 | 4064 | struct hns3_nic_priv *priv; |
---|
3129 | 4065 | struct net_device *netdev; |
---|
3130 | 4066 | int ret; |
---|
3131 | 4067 | |
---|
3132 | | - netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), |
---|
3133 | | - hns3_get_max_available_channels(handle)); |
---|
| 4068 | + handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, |
---|
| 4069 | + &max_rss_size); |
---|
| 4070 | + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); |
---|
3134 | 4071 | if (!netdev) |
---|
3135 | 4072 | return -ENOMEM; |
---|
3136 | 4073 | |
---|
.. | .. |
---|
3138 | 4075 | priv->dev = &pdev->dev; |
---|
3139 | 4076 | priv->netdev = netdev; |
---|
3140 | 4077 | priv->ae_handle = handle; |
---|
3141 | | - priv->ae_handle->last_reset_time = jiffies; |
---|
3142 | 4078 | priv->tx_timeout_count = 0; |
---|
| 4079 | + priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; |
---|
| 4080 | + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); |
---|
| 4081 | + |
---|
| 4082 | + handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); |
---|
3143 | 4083 | |
---|
3144 | 4084 | handle->kinfo.netdev = netdev; |
---|
3145 | 4085 | handle->priv = (void *)priv; |
---|
3146 | 4086 | |
---|
3147 | | - hns3_init_mac_addr(netdev, true); |
---|
| 4087 | + hns3_init_mac_addr(netdev); |
---|
3148 | 4088 | |
---|
3149 | 4089 | hns3_set_default_feature(netdev); |
---|
3150 | 4090 | |
---|
.. | .. |
---|
3153 | 4093 | netdev->netdev_ops = &hns3_nic_netdev_ops; |
---|
3154 | 4094 | SET_NETDEV_DEV(netdev, &pdev->dev); |
---|
3155 | 4095 | hns3_ethtool_set_ops(netdev); |
---|
3156 | | - hns3_nic_set_priv_ops(netdev); |
---|
3157 | 4096 | |
---|
3158 | 4097 | /* Carrier off reporting is important to ethtool even BEFORE open */ |
---|
3159 | 4098 | netif_carrier_off(netdev); |
---|
3160 | | - |
---|
3161 | | - if (handle->flags & HNAE3_SUPPORT_VF) |
---|
3162 | | - handle->reset_level = HNAE3_VF_RESET; |
---|
3163 | | - else |
---|
3164 | | - handle->reset_level = HNAE3_FUNC_RESET; |
---|
3165 | 4099 | |
---|
3166 | 4100 | ret = hns3_get_ring_config(priv); |
---|
3167 | 4101 | if (ret) { |
---|
.. | .. |
---|
3184 | 4118 | ret = hns3_init_all_ring(priv); |
---|
3185 | 4119 | if (ret) { |
---|
3186 | 4120 | ret = -ENOMEM; |
---|
3187 | | - goto out_init_ring_data; |
---|
| 4121 | + goto out_init_ring; |
---|
3188 | 4122 | } |
---|
| 4123 | + |
---|
| 4124 | + ret = hns3_init_phy(netdev); |
---|
| 4125 | + if (ret) |
---|
| 4126 | + goto out_init_phy; |
---|
| 4127 | + |
---|
| 4128 | + /* the device can work without cpu rmap, only aRFS needs it */ |
---|
| 4129 | + ret = hns3_set_rx_cpu_rmap(netdev); |
---|
| 4130 | + if (ret) |
---|
| 4131 | + dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); |
---|
| 4132 | + |
---|
| 4133 | + ret = hns3_nic_init_irq(priv); |
---|
| 4134 | + if (ret) { |
---|
| 4135 | + dev_err(priv->dev, "init irq failed! ret=%d\n", ret); |
---|
| 4136 | + hns3_free_rx_cpu_rmap(netdev); |
---|
| 4137 | + goto out_init_irq_fail; |
---|
| 4138 | + } |
---|
| 4139 | + |
---|
| 4140 | + ret = hns3_client_start(handle); |
---|
| 4141 | + if (ret) { |
---|
| 4142 | + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); |
---|
| 4143 | + goto out_client_start; |
---|
| 4144 | + } |
---|
| 4145 | + |
---|
| 4146 | + hns3_dcbnl_setup(handle); |
---|
| 4147 | + |
---|
| 4148 | + hns3_dbg_init(handle); |
---|
| 4149 | + |
---|
| 4150 | + /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ |
---|
| 4151 | + netdev->max_mtu = HNS3_MAX_MTU; |
---|
| 4152 | + |
---|
| 4153 | + set_bit(HNS3_NIC_STATE_INITED, &priv->state); |
---|
3189 | 4154 | |
---|
3190 | 4155 | ret = register_netdev(netdev); |
---|
3191 | 4156 | if (ret) { |
---|
.. | .. |
---|
3193 | 4158 | goto out_reg_netdev_fail; |
---|
3194 | 4159 | } |
---|
3195 | 4160 | |
---|
3196 | | - hns3_dcbnl_setup(handle); |
---|
3197 | | - |
---|
3198 | | - /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ |
---|
3199 | | - netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); |
---|
| 4161 | + if (netif_msg_drv(handle)) |
---|
| 4162 | + hns3_info_show(priv); |
---|
3200 | 4163 | |
---|
3201 | 4164 | return ret; |
---|
3202 | 4165 | |
---|
3203 | 4166 | out_reg_netdev_fail: |
---|
3204 | | -out_init_ring_data: |
---|
3205 | | - (void)hns3_nic_uninit_vector_data(priv); |
---|
| 4167 | + hns3_dbg_uninit(handle); |
---|
| 4168 | +out_client_start: |
---|
| 4169 | + hns3_free_rx_cpu_rmap(netdev); |
---|
| 4170 | + hns3_nic_uninit_irq(priv); |
---|
| 4171 | +out_init_irq_fail: |
---|
| 4172 | + hns3_uninit_phy(netdev); |
---|
| 4173 | +out_init_phy: |
---|
| 4174 | + hns3_uninit_all_ring(priv); |
---|
| 4175 | +out_init_ring: |
---|
| 4176 | + hns3_nic_uninit_vector_data(priv); |
---|
3206 | 4177 | out_init_vector_data: |
---|
3207 | 4178 | hns3_nic_dealloc_vector_data(priv); |
---|
3208 | 4179 | out_alloc_vector_data: |
---|
3209 | | - priv->ring_data = NULL; |
---|
| 4180 | + priv->ring = NULL; |
---|
3210 | 4181 | out_get_ring_cfg: |
---|
3211 | 4182 | priv->ae_handle = NULL; |
---|
3212 | 4183 | free_netdev(netdev); |
---|
.. | .. |
---|
3222 | 4193 | if (netdev->reg_state != NETREG_UNINITIALIZED) |
---|
3223 | 4194 | unregister_netdev(netdev); |
---|
3224 | 4195 | |
---|
3225 | | - hns3_force_clear_all_rx_ring(handle); |
---|
| 4196 | + hns3_client_stop(handle); |
---|
3226 | 4197 | |
---|
3227 | | - ret = hns3_nic_uninit_vector_data(priv); |
---|
3228 | | - if (ret) |
---|
3229 | | - netdev_err(netdev, "uninit vector error\n"); |
---|
| 4198 | + hns3_uninit_phy(netdev); |
---|
3230 | 4199 | |
---|
3231 | | - ret = hns3_nic_dealloc_vector_data(priv); |
---|
3232 | | - if (ret) |
---|
3233 | | - netdev_err(netdev, "dealloc vector error\n"); |
---|
| 4200 | + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { |
---|
| 4201 | + netdev_warn(netdev, "already uninitialized\n"); |
---|
| 4202 | + goto out_netdev_free; |
---|
| 4203 | + } |
---|
| 4204 | + |
---|
| 4205 | + hns3_free_rx_cpu_rmap(netdev); |
---|
| 4206 | + |
---|
| 4207 | + hns3_nic_uninit_irq(priv); |
---|
| 4208 | + |
---|
| 4209 | + hns3_del_all_fd_rules(netdev, true); |
---|
| 4210 | + |
---|
| 4211 | + hns3_clear_all_ring(handle, true); |
---|
| 4212 | + |
---|
| 4213 | + hns3_nic_uninit_vector_data(priv); |
---|
| 4214 | + |
---|
| 4215 | + hns3_nic_dealloc_vector_data(priv); |
---|
3234 | 4216 | |
---|
3235 | 4217 | ret = hns3_uninit_all_ring(priv); |
---|
3236 | 4218 | if (ret) |
---|
.. | .. |
---|
3238 | 4220 | |
---|
3239 | 4221 | hns3_put_ring_config(priv); |
---|
3240 | 4222 | |
---|
3241 | | - priv->ring_data = NULL; |
---|
3242 | | - |
---|
3243 | | - hns3_uninit_mac_addr(netdev); |
---|
3244 | | - |
---|
| 4223 | +out_netdev_free: |
---|
| 4224 | + hns3_dbg_uninit(handle); |
---|
3245 | 4225 | free_netdev(netdev); |
---|
3246 | 4226 | } |
---|
3247 | 4227 | |
---|
.. | .. |
---|
3253 | 4233 | return; |
---|
3254 | 4234 | |
---|
3255 | 4235 | if (linkup) { |
---|
3256 | | - netif_carrier_on(netdev); |
---|
3257 | 4236 | netif_tx_wake_all_queues(netdev); |
---|
3258 | | - netdev_info(netdev, "link up\n"); |
---|
| 4237 | + netif_carrier_on(netdev); |
---|
| 4238 | + if (netif_msg_link(handle)) |
---|
| 4239 | + netdev_info(netdev, "link up\n"); |
---|
3259 | 4240 | } else { |
---|
3260 | 4241 | netif_carrier_off(netdev); |
---|
3261 | 4242 | netif_tx_stop_all_queues(netdev); |
---|
3262 | | - netdev_info(netdev, "link down\n"); |
---|
| 4243 | + if (netif_msg_link(handle)) |
---|
| 4244 | + netdev_info(netdev, "link down\n"); |
---|
3263 | 4245 | } |
---|
3264 | 4246 | } |
---|
3265 | 4247 | |
---|
.. | .. |
---|
3267 | 4249 | { |
---|
3268 | 4250 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
---|
3269 | 4251 | struct net_device *ndev = kinfo->netdev; |
---|
3270 | | - bool if_running; |
---|
3271 | | - int ret; |
---|
3272 | 4252 | |
---|
3273 | 4253 | if (tc > HNAE3_MAX_TC) |
---|
3274 | 4254 | return -EINVAL; |
---|
.. | .. |
---|
3276 | 4256 | if (!ndev) |
---|
3277 | 4257 | return -ENODEV; |
---|
3278 | 4258 | |
---|
3279 | | - if_running = netif_running(ndev); |
---|
3280 | | - |
---|
3281 | | - if (if_running) { |
---|
3282 | | - (void)hns3_nic_net_stop(ndev); |
---|
3283 | | - msleep(100); |
---|
3284 | | - } |
---|
3285 | | - |
---|
3286 | | - ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? |
---|
3287 | | - kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; |
---|
3288 | | - if (ret) |
---|
3289 | | - goto err_out; |
---|
3290 | | - |
---|
3291 | | - ret = hns3_nic_set_real_num_queue(ndev); |
---|
3292 | | - |
---|
3293 | | -err_out: |
---|
3294 | | - if (if_running) |
---|
3295 | | - (void)hns3_nic_net_open(ndev); |
---|
3296 | | - |
---|
3297 | | - return ret; |
---|
3298 | | -} |
---|
3299 | | - |
---|
3300 | | -static void hns3_recover_hw_addr(struct net_device *ndev) |
---|
3301 | | -{ |
---|
3302 | | - struct netdev_hw_addr_list *list; |
---|
3303 | | - struct netdev_hw_addr *ha, *tmp; |
---|
3304 | | - |
---|
3305 | | - /* go through and sync uc_addr entries to the device */ |
---|
3306 | | - list = &ndev->uc; |
---|
3307 | | - list_for_each_entry_safe(ha, tmp, &list->list, list) |
---|
3308 | | - hns3_nic_uc_sync(ndev, ha->addr); |
---|
3309 | | - |
---|
3310 | | - /* go through and sync mc_addr entries to the device */ |
---|
3311 | | - list = &ndev->mc; |
---|
3312 | | - list_for_each_entry_safe(ha, tmp, &list->list, list) |
---|
3313 | | - hns3_nic_mc_sync(ndev, ha->addr); |
---|
| 4259 | + return hns3_nic_set_real_num_queue(ndev); |
---|
3314 | 4260 | } |
---|
3315 | 4261 | |
---|
3316 | 4262 | static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) |
---|
3317 | 4263 | { |
---|
3318 | 4264 | while (ring->next_to_clean != ring->next_to_use) { |
---|
3319 | 4265 | ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; |
---|
3320 | | - hns3_free_buffer_detach(ring, ring->next_to_clean); |
---|
| 4266 | + hns3_free_buffer_detach(ring, ring->next_to_clean, 0); |
---|
3321 | 4267 | ring_ptr_move_fw(ring, next_to_clean); |
---|
3322 | 4268 | } |
---|
| 4269 | + |
---|
| 4270 | + ring->pending_buf = 0; |
---|
3323 | 4271 | } |
---|
3324 | 4272 | |
---|
3325 | 4273 | static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) |
---|
.. | .. |
---|
3333 | 4281 | * stack, so we need to replace the buffer here. |
---|
3334 | 4282 | */ |
---|
3335 | 4283 | if (!ring->desc_cb[ring->next_to_use].reuse_flag) { |
---|
3336 | | - ret = hns3_reserve_buffer_map(ring, &res_cbs); |
---|
| 4284 | + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); |
---|
3337 | 4285 | if (ret) { |
---|
3338 | 4286 | u64_stats_update_begin(&ring->syncp); |
---|
3339 | 4287 | ring->stats.sw_err_cnt++; |
---|
.. | .. |
---|
3341 | 4289 | /* if alloc new buffer fail, exit directly |
---|
3342 | 4290 | * and reclear in up flow. |
---|
3343 | 4291 | */ |
---|
3344 | | - netdev_warn(ring->tqp->handle->kinfo.netdev, |
---|
| 4292 | + netdev_warn(ring_to_netdev(ring), |
---|
3345 | 4293 | "reserve buffer map failed, ret = %d\n", |
---|
3346 | 4294 | ret); |
---|
3347 | 4295 | return ret; |
---|
3348 | 4296 | } |
---|
3349 | | - hns3_replace_buffer(ring, ring->next_to_use, |
---|
3350 | | - &res_cbs); |
---|
| 4297 | + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); |
---|
3351 | 4298 | } |
---|
3352 | 4299 | ring_ptr_move_fw(ring, next_to_use); |
---|
| 4300 | + } |
---|
| 4301 | + |
---|
| 4302 | + /* Free the pending skb in rx ring */ |
---|
| 4303 | + if (ring->skb) { |
---|
| 4304 | + dev_kfree_skb_any(ring->skb); |
---|
| 4305 | + ring->skb = NULL; |
---|
| 4306 | + ring->pending_buf = 0; |
---|
3353 | 4307 | } |
---|
3354 | 4308 | |
---|
3355 | 4309 | return 0; |
---|
.. | .. |
---|
3372 | 4326 | } |
---|
3373 | 4327 | } |
---|
3374 | 4328 | |
---|
3375 | | -static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h) |
---|
3376 | | -{ |
---|
3377 | | - struct net_device *ndev = h->kinfo.netdev; |
---|
3378 | | - struct hns3_nic_priv *priv = netdev_priv(ndev); |
---|
3379 | | - struct hns3_enet_ring *ring; |
---|
3380 | | - u32 i; |
---|
3381 | | - |
---|
3382 | | - for (i = 0; i < h->kinfo.num_tqps; i++) { |
---|
3383 | | - ring = priv->ring_data[i + h->kinfo.num_tqps].ring; |
---|
3384 | | - hns3_force_clear_rx_ring(ring); |
---|
3385 | | - } |
---|
3386 | | -} |
---|
3387 | | - |
---|
3388 | | -static void hns3_clear_all_ring(struct hnae3_handle *h) |
---|
| 4329 | +static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) |
---|
3389 | 4330 | { |
---|
3390 | 4331 | struct net_device *ndev = h->kinfo.netdev; |
---|
3391 | 4332 | struct hns3_nic_priv *priv = netdev_priv(ndev); |
---|
3392 | 4333 | u32 i; |
---|
3393 | 4334 | |
---|
3394 | 4335 | for (i = 0; i < h->kinfo.num_tqps; i++) { |
---|
3395 | | - struct netdev_queue *dev_queue; |
---|
3396 | 4336 | struct hns3_enet_ring *ring; |
---|
3397 | 4337 | |
---|
3398 | | - ring = priv->ring_data[i].ring; |
---|
| 4338 | + ring = &priv->ring[i]; |
---|
3399 | 4339 | hns3_clear_tx_ring(ring); |
---|
3400 | | - dev_queue = netdev_get_tx_queue(ndev, |
---|
3401 | | - priv->ring_data[i].queue_index); |
---|
3402 | | - netdev_tx_reset_queue(dev_queue); |
---|
3403 | 4340 | |
---|
3404 | | - ring = priv->ring_data[i + h->kinfo.num_tqps].ring; |
---|
| 4341 | + ring = &priv->ring[i + h->kinfo.num_tqps]; |
---|
3405 | 4342 | /* Continue to clear other rings even if clearing some |
---|
3406 | 4343 | * rings failed. |
---|
3407 | 4344 | */ |
---|
3408 | | - hns3_clear_rx_ring(ring); |
---|
| 4345 | + if (force) |
---|
| 4346 | + hns3_force_clear_rx_ring(ring); |
---|
| 4347 | + else |
---|
| 4348 | + hns3_clear_rx_ring(ring); |
---|
3409 | 4349 | } |
---|
3410 | 4350 | } |
---|
3411 | 4351 | |
---|
.. | .. |
---|
3418 | 4358 | int ret; |
---|
3419 | 4359 | |
---|
3420 | 4360 | for (i = 0; i < h->kinfo.num_tqps; i++) { |
---|
3421 | | - h->ae_algo->ops->reset_queue(h, i); |
---|
3422 | | - hns3_init_ring_hw(priv->ring_data[i].ring); |
---|
| 4361 | + ret = h->ae_algo->ops->reset_queue(h, i); |
---|
| 4362 | + if (ret) |
---|
| 4363 | + return ret; |
---|
| 4364 | + |
---|
| 4365 | + hns3_init_ring_hw(&priv->ring[i]); |
---|
3423 | 4366 | |
---|
3424 | 4367 | /* We need to clear tx ring here because self test will |
---|
3425 | 4368 | * use the ring and will not run down before up |
---|
3426 | 4369 | */ |
---|
3427 | | - hns3_clear_tx_ring(priv->ring_data[i].ring); |
---|
3428 | | - priv->ring_data[i].ring->next_to_clean = 0; |
---|
3429 | | - priv->ring_data[i].ring->next_to_use = 0; |
---|
| 4370 | + hns3_clear_tx_ring(&priv->ring[i]); |
---|
| 4371 | + priv->ring[i].next_to_clean = 0; |
---|
| 4372 | + priv->ring[i].next_to_use = 0; |
---|
| 4373 | + priv->ring[i].last_to_use = 0; |
---|
3430 | 4374 | |
---|
3431 | | - rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; |
---|
| 4375 | + rx_ring = &priv->ring[i + h->kinfo.num_tqps]; |
---|
3432 | 4376 | hns3_init_ring_hw(rx_ring); |
---|
3433 | 4377 | ret = hns3_clear_rx_ring(rx_ring); |
---|
3434 | 4378 | if (ret) |
---|
.. | .. |
---|
3452 | 4396 | static void hns3_store_coal(struct hns3_nic_priv *priv) |
---|
3453 | 4397 | { |
---|
3454 | 4398 | /* ethtool only support setting and querying one coal |
---|
3455 | | - * configuation for now, so save the vector 0' coal |
---|
3456 | | - * configuation here in order to restore it. |
---|
| 4399 | + * configuration for now, so save the vector 0' coal |
---|
| 4400 | + * configuration here in order to restore it. |
---|
3457 | 4401 | */ |
---|
3458 | 4402 | memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, |
---|
3459 | 4403 | sizeof(struct hns3_enet_coalesce)); |
---|
.. | .. |
---|
3478 | 4422 | { |
---|
3479 | 4423 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
---|
3480 | 4424 | struct net_device *ndev = kinfo->netdev; |
---|
| 4425 | + struct hns3_nic_priv *priv = netdev_priv(ndev); |
---|
| 4426 | + |
---|
| 4427 | + if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) |
---|
| 4428 | + return 0; |
---|
3481 | 4429 | |
---|
3482 | 4430 | if (!netif_running(ndev)) |
---|
3483 | 4431 | return 0; |
---|
.. | .. |
---|
3488 | 4436 | static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) |
---|
3489 | 4437 | { |
---|
3490 | 4438 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
---|
| 4439 | + struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); |
---|
3491 | 4440 | int ret = 0; |
---|
3492 | 4441 | |
---|
| 4442 | + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { |
---|
| 4443 | + netdev_err(kinfo->netdev, "device is not initialized yet\n"); |
---|
| 4444 | + return -EFAULT; |
---|
| 4445 | + } |
---|
| 4446 | + |
---|
| 4447 | + clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); |
---|
| 4448 | + |
---|
3493 | 4449 | if (netif_running(kinfo->netdev)) { |
---|
3494 | | - ret = hns3_nic_net_up(kinfo->netdev); |
---|
| 4450 | + ret = hns3_nic_net_open(kinfo->netdev); |
---|
3495 | 4451 | if (ret) { |
---|
| 4452 | + set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); |
---|
3496 | 4453 | netdev_err(kinfo->netdev, |
---|
3497 | | - "hns net up fail, ret=%d!\n", ret); |
---|
| 4454 | + "net up fail, ret=%d!\n", ret); |
---|
3498 | 4455 | return ret; |
---|
3499 | 4456 | } |
---|
3500 | | - handle->last_reset_time = jiffies; |
---|
3501 | 4457 | } |
---|
3502 | 4458 | |
---|
3503 | 4459 | return ret; |
---|
.. | .. |
---|
3509 | 4465 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
3510 | 4466 | int ret; |
---|
3511 | 4467 | |
---|
3512 | | - hns3_init_mac_addr(netdev, false); |
---|
3513 | | - hns3_nic_set_rx_mode(netdev); |
---|
3514 | | - hns3_recover_hw_addr(netdev); |
---|
3515 | | - |
---|
3516 | | - /* Hardware table is only clear when pf resets */ |
---|
3517 | | - if (!(handle->flags & HNAE3_SUPPORT_VF)) |
---|
3518 | | - hns3_restore_vlan(netdev); |
---|
3519 | | - |
---|
3520 | 4468 | /* Carrier off reporting is important to ethtool even BEFORE open */ |
---|
3521 | 4469 | netif_carrier_off(netdev); |
---|
| 4470 | + |
---|
| 4471 | + ret = hns3_get_ring_config(priv); |
---|
| 4472 | + if (ret) |
---|
| 4473 | + return ret; |
---|
| 4474 | + |
---|
| 4475 | + ret = hns3_nic_alloc_vector_data(priv); |
---|
| 4476 | + if (ret) |
---|
| 4477 | + goto err_put_ring; |
---|
3522 | 4478 | |
---|
3523 | 4479 | hns3_restore_coal(priv); |
---|
3524 | 4480 | |
---|
3525 | 4481 | ret = hns3_nic_init_vector_data(priv); |
---|
3526 | 4482 | if (ret) |
---|
3527 | | - return ret; |
---|
| 4483 | + goto err_dealloc_vector; |
---|
3528 | 4484 | |
---|
3529 | 4485 | ret = hns3_init_all_ring(priv); |
---|
| 4486 | + if (ret) |
---|
| 4487 | + goto err_uninit_vector; |
---|
| 4488 | + |
---|
| 4489 | + /* the device can work without cpu rmap, only aRFS needs it */ |
---|
| 4490 | + ret = hns3_set_rx_cpu_rmap(netdev); |
---|
| 4491 | + if (ret) |
---|
| 4492 | + dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); |
---|
| 4493 | + |
---|
| 4494 | + ret = hns3_nic_init_irq(priv); |
---|
3530 | 4495 | if (ret) { |
---|
3531 | | - hns3_nic_uninit_vector_data(priv); |
---|
3532 | | - priv->ring_data = NULL; |
---|
| 4496 | + dev_err(priv->dev, "init irq failed! ret=%d\n", ret); |
---|
| 4497 | + hns3_free_rx_cpu_rmap(netdev); |
---|
| 4498 | + goto err_init_irq_fail; |
---|
3533 | 4499 | } |
---|
| 4500 | + |
---|
| 4501 | + if (!hns3_is_phys_func(handle->pdev)) |
---|
| 4502 | + hns3_init_mac_addr(netdev); |
---|
| 4503 | + |
---|
| 4504 | + ret = hns3_client_start(handle); |
---|
| 4505 | + if (ret) { |
---|
| 4506 | + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); |
---|
| 4507 | + goto err_client_start_fail; |
---|
| 4508 | + } |
---|
| 4509 | + |
---|
| 4510 | + set_bit(HNS3_NIC_STATE_INITED, &priv->state); |
---|
| 4511 | + |
---|
| 4512 | + return ret; |
---|
| 4513 | + |
---|
| 4514 | +err_client_start_fail: |
---|
| 4515 | + hns3_free_rx_cpu_rmap(netdev); |
---|
| 4516 | + hns3_nic_uninit_irq(priv); |
---|
| 4517 | +err_init_irq_fail: |
---|
| 4518 | + hns3_uninit_all_ring(priv); |
---|
| 4519 | +err_uninit_vector: |
---|
| 4520 | + hns3_nic_uninit_vector_data(priv); |
---|
| 4521 | +err_dealloc_vector: |
---|
| 4522 | + hns3_nic_dealloc_vector_data(priv); |
---|
| 4523 | +err_put_ring: |
---|
| 4524 | + hns3_put_ring_config(priv); |
---|
3534 | 4525 | |
---|
3535 | 4526 | return ret; |
---|
3536 | 4527 | } |
---|
.. | .. |
---|
3541 | 4532 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
3542 | 4533 | int ret; |
---|
3543 | 4534 | |
---|
3544 | | - hns3_force_clear_all_rx_ring(handle); |
---|
3545 | | - |
---|
3546 | | - ret = hns3_nic_uninit_vector_data(priv); |
---|
3547 | | - if (ret) { |
---|
3548 | | - netdev_err(netdev, "uninit vector error\n"); |
---|
3549 | | - return ret; |
---|
| 4535 | + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { |
---|
| 4536 | + netdev_warn(netdev, "already uninitialized\n"); |
---|
| 4537 | + return 0; |
---|
3550 | 4538 | } |
---|
3551 | 4539 | |
---|
| 4540 | + hns3_free_rx_cpu_rmap(netdev); |
---|
| 4541 | + hns3_nic_uninit_irq(priv); |
---|
| 4542 | + hns3_clear_all_ring(handle, true); |
---|
| 4543 | + hns3_reset_tx_queue(priv->ae_handle); |
---|
| 4544 | + |
---|
| 4545 | + hns3_nic_uninit_vector_data(priv); |
---|
| 4546 | + |
---|
3552 | 4547 | hns3_store_coal(priv); |
---|
| 4548 | + |
---|
| 4549 | + hns3_nic_dealloc_vector_data(priv); |
---|
3553 | 4550 | |
---|
3554 | 4551 | ret = hns3_uninit_all_ring(priv); |
---|
3555 | 4552 | if (ret) |
---|
3556 | 4553 | netdev_err(netdev, "uninit ring error\n"); |
---|
3557 | 4554 | |
---|
3558 | | - hns3_uninit_mac_addr(netdev); |
---|
| 4555 | + hns3_put_ring_config(priv); |
---|
3559 | 4556 | |
---|
3560 | 4557 | return ret; |
---|
3561 | 4558 | } |
---|
.. | .. |
---|
3585 | 4582 | return ret; |
---|
3586 | 4583 | } |
---|
3587 | 4584 | |
---|
3588 | | -static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) |
---|
| 4585 | +static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, |
---|
| 4586 | + bool rxfh_configured) |
---|
3589 | 4587 | { |
---|
3590 | | - struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
3591 | | - struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
3592 | 4588 | int ret; |
---|
3593 | 4589 | |
---|
3594 | | - ret = h->ae_algo->ops->set_channels(h, new_tqp_num); |
---|
| 4590 | + ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, |
---|
| 4591 | + rxfh_configured); |
---|
| 4592 | + if (ret) { |
---|
| 4593 | + dev_err(&handle->pdev->dev, |
---|
| 4594 | + "Change tqp num(%u) fail.\n", new_tqp_num); |
---|
| 4595 | + return ret; |
---|
| 4596 | + } |
---|
| 4597 | + |
---|
| 4598 | + ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); |
---|
3595 | 4599 | if (ret) |
---|
3596 | 4600 | return ret; |
---|
3597 | 4601 | |
---|
3598 | | - ret = hns3_get_ring_config(priv); |
---|
| 4602 | + ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); |
---|
3599 | 4603 | if (ret) |
---|
3600 | | - return ret; |
---|
| 4604 | + hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); |
---|
3601 | 4605 | |
---|
3602 | | - ret = hns3_nic_alloc_vector_data(priv); |
---|
3603 | | - if (ret) |
---|
3604 | | - goto err_alloc_vector; |
---|
3605 | | - |
---|
3606 | | - hns3_restore_coal(priv); |
---|
3607 | | - |
---|
3608 | | - ret = hns3_nic_init_vector_data(priv); |
---|
3609 | | - if (ret) |
---|
3610 | | - goto err_uninit_vector; |
---|
3611 | | - |
---|
3612 | | - ret = hns3_init_all_ring(priv); |
---|
3613 | | - if (ret) |
---|
3614 | | - goto err_put_ring; |
---|
3615 | | - |
---|
3616 | | - return 0; |
---|
3617 | | - |
---|
3618 | | -err_put_ring: |
---|
3619 | | - hns3_put_ring_config(priv); |
---|
3620 | | -err_uninit_vector: |
---|
3621 | | - hns3_nic_uninit_vector_data(priv); |
---|
3622 | | -err_alloc_vector: |
---|
3623 | | - hns3_nic_dealloc_vector_data(priv); |
---|
3624 | 4606 | return ret; |
---|
3625 | | -} |
---|
3626 | | - |
---|
3627 | | -static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num) |
---|
3628 | | -{ |
---|
3629 | | - return (new_tqp_num / num_tc) * num_tc; |
---|
3630 | 4607 | } |
---|
3631 | 4608 | |
---|
3632 | 4609 | int hns3_set_channels(struct net_device *netdev, |
---|
3633 | 4610 | struct ethtool_channels *ch) |
---|
3634 | 4611 | { |
---|
3635 | | - struct hns3_nic_priv *priv = netdev_priv(netdev); |
---|
3636 | 4612 | struct hnae3_handle *h = hns3_get_handle(netdev); |
---|
3637 | 4613 | struct hnae3_knic_private_info *kinfo = &h->kinfo; |
---|
3638 | | - bool if_running = netif_running(netdev); |
---|
| 4614 | + bool rxfh_configured = netif_is_rxfh_configured(netdev); |
---|
3639 | 4615 | u32 new_tqp_num = ch->combined_count; |
---|
3640 | 4616 | u16 org_tqp_num; |
---|
3641 | 4617 | int ret; |
---|
| 4618 | + |
---|
| 4619 | + if (hns3_nic_resetting(netdev)) |
---|
| 4620 | + return -EBUSY; |
---|
3642 | 4621 | |
---|
3643 | 4622 | if (ch->rx_count || ch->tx_count) |
---|
3644 | 4623 | return -EINVAL; |
---|
3645 | 4624 | |
---|
3646 | 4625 | if (new_tqp_num > hns3_get_max_available_channels(h) || |
---|
3647 | | - new_tqp_num < kinfo->num_tc) { |
---|
| 4626 | + new_tqp_num < 1) { |
---|
3648 | 4627 | dev_err(&netdev->dev, |
---|
3649 | | - "Change tqps fail, the tqp range is from %d to %d", |
---|
3650 | | - kinfo->num_tc, |
---|
| 4628 | + "Change tqps fail, the tqp range is from 1 to %u", |
---|
3651 | 4629 | hns3_get_max_available_channels(h)); |
---|
3652 | 4630 | return -EINVAL; |
---|
3653 | 4631 | } |
---|
3654 | 4632 | |
---|
3655 | | - new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); |
---|
3656 | | - if (kinfo->num_tqps == new_tqp_num) |
---|
| 4633 | + if (kinfo->rss_size == new_tqp_num) |
---|
3657 | 4634 | return 0; |
---|
3658 | 4635 | |
---|
3659 | | - if (if_running) |
---|
3660 | | - hns3_nic_net_stop(netdev); |
---|
| 4636 | + netif_dbg(h, drv, netdev, |
---|
| 4637 | + "set channels: tqp_num=%u, rxfh=%d\n", |
---|
| 4638 | + new_tqp_num, rxfh_configured); |
---|
3661 | 4639 | |
---|
3662 | | - ret = hns3_nic_uninit_vector_data(priv); |
---|
3663 | | - if (ret) { |
---|
3664 | | - dev_err(&netdev->dev, |
---|
3665 | | - "Unbind vector with tqp fail, nothing is changed"); |
---|
3666 | | - goto open_netdev; |
---|
3667 | | - } |
---|
| 4640 | + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); |
---|
| 4641 | + if (ret) |
---|
| 4642 | + return ret; |
---|
3668 | 4643 | |
---|
3669 | | - hns3_store_coal(priv); |
---|
3670 | | - |
---|
3671 | | - hns3_nic_dealloc_vector_data(priv); |
---|
3672 | | - |
---|
3673 | | - hns3_uninit_all_ring(priv); |
---|
3674 | | - hns3_put_ring_config(priv); |
---|
| 4644 | + ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); |
---|
| 4645 | + if (ret) |
---|
| 4646 | + return ret; |
---|
3675 | 4647 | |
---|
3676 | 4648 | org_tqp_num = h->kinfo.num_tqps; |
---|
3677 | | - ret = hns3_modify_tqp_num(netdev, new_tqp_num); |
---|
| 4649 | + ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); |
---|
3678 | 4650 | if (ret) { |
---|
3679 | | - ret = hns3_modify_tqp_num(netdev, org_tqp_num); |
---|
3680 | | - if (ret) { |
---|
3681 | | - /* If revert to old tqp failed, fatal error occurred */ |
---|
3682 | | - dev_err(&netdev->dev, |
---|
3683 | | - "Revert to old tqp num fail, ret=%d", ret); |
---|
3684 | | - return ret; |
---|
| 4651 | + int ret1; |
---|
| 4652 | + |
---|
| 4653 | + netdev_warn(netdev, |
---|
| 4654 | + "Change channels fail, revert to old value\n"); |
---|
| 4655 | + ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); |
---|
| 4656 | + if (ret1) { |
---|
| 4657 | + netdev_err(netdev, |
---|
| 4658 | + "revert to old channel fail\n"); |
---|
| 4659 | + return ret1; |
---|
3685 | 4660 | } |
---|
3686 | | - dev_info(&netdev->dev, |
---|
3687 | | - "Change tqp num fail, Revert to old tqp num"); |
---|
| 4661 | + |
---|
| 4662 | + return ret; |
---|
3688 | 4663 | } |
---|
3689 | 4664 | |
---|
3690 | | -open_netdev: |
---|
3691 | | - if (if_running) |
---|
3692 | | - hns3_nic_net_open(netdev); |
---|
| 4665 | + return 0; |
---|
| 4666 | +} |
---|
3693 | 4667 | |
---|
3694 | | - return ret; |
---|
| 4668 | +static const struct hns3_hw_error_info hns3_hw_err[] = { |
---|
| 4669 | + { .type = HNAE3_PPU_POISON_ERROR, |
---|
| 4670 | + .msg = "PPU poison" }, |
---|
| 4671 | + { .type = HNAE3_CMDQ_ECC_ERROR, |
---|
| 4672 | + .msg = "IMP CMDQ error" }, |
---|
| 4673 | + { .type = HNAE3_IMP_RD_POISON_ERROR, |
---|
| 4674 | + .msg = "IMP RD poison" }, |
---|
| 4675 | + { .type = HNAE3_ROCEE_AXI_RESP_ERROR, |
---|
| 4676 | + .msg = "ROCEE AXI RESP error" }, |
---|
| 4677 | +}; |
---|
| 4678 | + |
---|
| 4679 | +static void hns3_process_hw_error(struct hnae3_handle *handle, |
---|
| 4680 | + enum hnae3_hw_error_type type) |
---|
| 4681 | +{ |
---|
| 4682 | + int i; |
---|
| 4683 | + |
---|
| 4684 | + for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { |
---|
| 4685 | + if (hns3_hw_err[i].type == type) { |
---|
| 4686 | + dev_err(&handle->pdev->dev, "Detected %s!\n", |
---|
| 4687 | + hns3_hw_err[i].msg); |
---|
| 4688 | + break; |
---|
| 4689 | + } |
---|
| 4690 | + } |
---|
3695 | 4691 | } |
---|
3696 | 4692 | |
---|
3697 | 4693 | static const struct hnae3_client_ops client_ops = { |
---|
.. | .. |
---|
3700 | 4696 | .link_status_change = hns3_link_status_change, |
---|
3701 | 4697 | .setup_tc = hns3_client_setup_tc, |
---|
3702 | 4698 | .reset_notify = hns3_reset_notify, |
---|
| 4699 | + .process_hw_error = hns3_process_hw_error, |
---|
3703 | 4700 | }; |
---|
3704 | 4701 | |
---|
3705 | 4702 | /* hns3_init_module - Driver registration routine |
---|
.. | .. |
---|
3714 | 4711 | pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); |
---|
3715 | 4712 | |
---|
3716 | 4713 | client.type = HNAE3_CLIENT_KNIC; |
---|
3717 | | - snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", |
---|
| 4714 | + snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", |
---|
3718 | 4715 | hns3_driver_name); |
---|
3719 | 4716 | |
---|
3720 | 4717 | client.ops = &client_ops; |
---|
3721 | 4718 | |
---|
3722 | 4719 | INIT_LIST_HEAD(&client.node); |
---|
3723 | 4720 | |
---|
| 4721 | + hns3_dbg_register_debugfs(hns3_driver_name); |
---|
| 4722 | + |
---|
3724 | 4723 | ret = hnae3_register_client(&client); |
---|
3725 | 4724 | if (ret) |
---|
3726 | | - return ret; |
---|
| 4725 | + goto err_reg_client; |
---|
3727 | 4726 | |
---|
3728 | 4727 | ret = pci_register_driver(&hns3_driver); |
---|
3729 | 4728 | if (ret) |
---|
3730 | | - hnae3_unregister_client(&client); |
---|
| 4729 | + goto err_reg_driver; |
---|
3731 | 4730 | |
---|
| 4731 | + return ret; |
---|
| 4732 | + |
---|
| 4733 | +err_reg_driver: |
---|
| 4734 | + hnae3_unregister_client(&client); |
---|
| 4735 | +err_reg_client: |
---|
| 4736 | + hns3_dbg_unregister_debugfs(); |
---|
3732 | 4737 | return ret; |
---|
3733 | 4738 | } |
---|
3734 | 4739 | module_init(hns3_init_module); |
---|
.. | .. |
---|
3741 | 4746 | { |
---|
3742 | 4747 | pci_unregister_driver(&hns3_driver); |
---|
3743 | 4748 | hnae3_unregister_client(&client); |
---|
| 4749 | + hns3_dbg_unregister_debugfs(); |
---|
3744 | 4750 | } |
---|
3745 | 4751 | module_exit(hns3_exit_module); |
---|
3746 | 4752 | |
---|
.. | .. |
---|
3748 | 4754 | MODULE_AUTHOR("Huawei Tech. Co., Ltd."); |
---|
3749 | 4755 | MODULE_LICENSE("GPL"); |
---|
3750 | 4756 | MODULE_ALIAS("pci:hns-nic"); |
---|
3751 | | -MODULE_VERSION(HNS3_MOD_VERSION); |
---|