hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/net/ethernet/huawei/hinic/hinic_tx.c
....@@ -1,16 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Huawei HiNIC PCI Express Linux driver
34 * Copyright(c) 2017 Huawei Technologies Co., Ltd
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
- * for more details.
13
- *
145 */
156
167 #include <linux/kernel.h>
....@@ -26,6 +17,13 @@
2617 #include <linux/skbuff.h>
2718 #include <linux/smp.h>
2819 #include <asm/byteorder.h>
20
+#include <linux/ip.h>
21
+#include <linux/tcp.h>
22
+#include <linux/sctp.h>
23
+#include <linux/ipv6.h>
24
+#include <net/ipv6.h>
25
+#include <net/checksum.h>
26
+#include <net/ip6_checksum.h>
2927
3028 #include "hinic_common.h"
3129 #include "hinic_hw_if.h"
....@@ -45,9 +43,31 @@
4543 #define CI_UPDATE_NO_PENDING 0
4644 #define CI_UPDATE_NO_COALESC 0
4745
48
-#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
46
+#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
4947
50
-#define MIN_SKB_LEN 64
48
+#define MIN_SKB_LEN 32
49
+
50
+#define MAX_PAYLOAD_OFFSET 221
51
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
52
+
53
+union hinic_l3 {
54
+ struct iphdr *v4;
55
+ struct ipv6hdr *v6;
56
+ unsigned char *hdr;
57
+};
58
+
59
+union hinic_l4 {
60
+ struct tcphdr *tcp;
61
+ struct udphdr *udp;
62
+ unsigned char *hdr;
63
+};
64
+
65
+enum hinic_offload_type {
66
+ TX_OFFLOAD_TSO = BIT(0),
67
+ TX_OFFLOAD_CSUM = BIT(1),
68
+ TX_OFFLOAD_VLAN = BIT(2),
69
+ TX_OFFLOAD_INVALID = BIT(3),
70
+};
5171
5272 /**
5373 * hinic_txq_clean_stats - Clean the statistics of specific queue
....@@ -63,6 +83,7 @@
6383 txq_stats->tx_busy = 0;
6484 txq_stats->tx_wake = 0;
6585 txq_stats->tx_dropped = 0;
86
+ txq_stats->big_frags_pkts = 0;
6687 u64_stats_update_end(&txq_stats->syncp);
6788 }
6889
....@@ -76,16 +97,15 @@
7697 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
7798 unsigned int start;
7899
79
- u64_stats_update_begin(&stats->syncp);
80100 do {
81
- start = u64_stats_fetch_begin(&txq_stats->syncp);
101
+ start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
82102 stats->pkts = txq_stats->pkts;
83103 stats->bytes = txq_stats->bytes;
84104 stats->tx_busy = txq_stats->tx_busy;
85105 stats->tx_wake = txq_stats->tx_wake;
86106 stats->tx_dropped = txq_stats->tx_dropped;
87
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
88
- u64_stats_update_end(&stats->syncp);
107
+ stats->big_frags_pkts = txq_stats->big_frags_pkts;
108
+ } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
89109 }
90110
91111 /**
....@@ -114,7 +134,7 @@
114134 struct hinic_hwdev *hwdev = nic_dev->hwdev;
115135 struct hinic_hwif *hwif = hwdev->hwif;
116136 struct pci_dev *pdev = hwif->pdev;
117
- struct skb_frag_struct *frag;
137
+ skb_frag_t *frag;
118138 dma_addr_t dma_addr;
119139 int i, j;
120140
....@@ -175,18 +195,370 @@
175195 DMA_TO_DEVICE);
176196 }
177197
178
-netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
198
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
199
+ union hinic_l4 *l4,
200
+ enum hinic_offload_type offload_type,
201
+ enum hinic_l3_offload_type *l3_type,
202
+ u8 *l4_proto)
203
+{
204
+ u8 *exthdr;
205
+
206
+ if (ip->v4->version == 4) {
207
+ *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
208
+ IPV4_PKT_NO_CHKSUM_OFFLOAD :
209
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD;
210
+ *l4_proto = ip->v4->protocol;
211
+ } else if (ip->v4->version == 6) {
212
+ *l3_type = IPV6_PKT;
213
+ exthdr = ip->hdr + sizeof(*ip->v6);
214
+ *l4_proto = ip->v6->nexthdr;
215
+ if (exthdr != l4->hdr) {
216
+ int start = exthdr - skb->data;
217
+ __be16 frag_off;
218
+
219
+ ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
220
+ }
221
+ } else {
222
+ *l3_type = L3TYPE_UNKNOWN;
223
+ *l4_proto = 0;
224
+ }
225
+}
226
+
227
+static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
228
+ enum hinic_offload_type offload_type, u8 l4_proto,
229
+ enum hinic_l4_offload_type *l4_offload,
230
+ u32 *l4_len, u32 *offset)
231
+{
232
+ *l4_offload = OFFLOAD_DISABLE;
233
+ *offset = 0;
234
+ *l4_len = 0;
235
+
236
+ switch (l4_proto) {
237
+ case IPPROTO_TCP:
238
+ *l4_offload = TCP_OFFLOAD_ENABLE;
239
+ /* doff in unit of 4B */
240
+ *l4_len = l4->tcp->doff * 4;
241
+ *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
242
+ break;
243
+
244
+ case IPPROTO_UDP:
245
+ *l4_offload = UDP_OFFLOAD_ENABLE;
246
+ *l4_len = sizeof(struct udphdr);
247
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
248
+ break;
249
+
250
+ case IPPROTO_SCTP:
251
+ /* only csum offload support sctp */
252
+ if (offload_type != TX_OFFLOAD_CSUM)
253
+ break;
254
+
255
+ *l4_offload = SCTP_OFFLOAD_ENABLE;
256
+ *l4_len = sizeof(struct sctphdr);
257
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
258
+ break;
259
+
260
+ default:
261
+ break;
262
+ }
263
+}
264
+
265
+static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
266
+{
267
+ return (ip->v4->version == 4) ?
268
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
269
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
270
+}
271
+
272
+static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
273
+ struct sk_buff *skb)
274
+{
275
+ u32 offset, l4_len, ip_identify, network_hdr_len;
276
+ enum hinic_l3_offload_type l3_offload;
277
+ enum hinic_l4_offload_type l4_offload;
278
+ union hinic_l3 ip;
279
+ union hinic_l4 l4;
280
+ u8 l4_proto;
281
+
282
+ if (!skb_is_gso(skb))
283
+ return 0;
284
+
285
+ if (skb_cow_head(skb, 0) < 0)
286
+ return -EPROTONOSUPPORT;
287
+
288
+ if (skb->encapsulation) {
289
+ u32 gso_type = skb_shinfo(skb)->gso_type;
290
+ u32 tunnel_type = 0;
291
+ u32 l4_tunnel_len;
292
+
293
+ ip.hdr = skb_network_header(skb);
294
+ l4.hdr = skb_transport_header(skb);
295
+ network_hdr_len = skb_inner_network_header_len(skb);
296
+
297
+ if (ip.v4->version == 4) {
298
+ ip.v4->tot_len = 0;
299
+ l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
300
+ } else if (ip.v4->version == 6) {
301
+ l3_offload = IPV6_PKT;
302
+ } else {
303
+ l3_offload = 0;
304
+ }
305
+
306
+ hinic_task_set_outter_l3(task, l3_offload,
307
+ skb_network_header_len(skb));
308
+
309
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
310
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
311
+ tunnel_type = TUNNEL_UDP_CSUM;
312
+ } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
313
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
314
+ }
315
+
316
+ l4_tunnel_len = skb_inner_network_offset(skb) -
317
+ skb_transport_offset(skb);
318
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
319
+
320
+ ip.hdr = skb_inner_network_header(skb);
321
+ l4.hdr = skb_inner_transport_header(skb);
322
+ } else {
323
+ ip.hdr = skb_network_header(skb);
324
+ l4.hdr = skb_transport_header(skb);
325
+ network_hdr_len = skb_network_header_len(skb);
326
+ }
327
+
328
+ /* initialize inner IP header fields */
329
+ if (ip.v4->version == 4)
330
+ ip.v4->tot_len = 0;
331
+ else
332
+ ip.v6->payload_len = 0;
333
+
334
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
335
+ &l4_proto);
336
+
337
+ hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
338
+
339
+ ip_identify = 0;
340
+ if (l4_proto == IPPROTO_TCP)
341
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
342
+
343
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
344
+ &l4_len, &offset);
345
+
346
+ hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
347
+ ip_identify, skb_shinfo(skb)->gso_size);
348
+
349
+ return 1;
350
+}
351
+
352
+static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
353
+ struct sk_buff *skb)
354
+{
355
+ enum hinic_l4_offload_type l4_offload;
356
+ u32 offset, l4_len, network_hdr_len;
357
+ enum hinic_l3_offload_type l3_type;
358
+ u32 tunnel_type = NOT_TUNNEL;
359
+ union hinic_l3 ip;
360
+ union hinic_l4 l4;
361
+ u8 l4_proto;
362
+
363
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
364
+ return 0;
365
+
366
+ if (skb->encapsulation) {
367
+ u32 l4_tunnel_len;
368
+
369
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
370
+ ip.hdr = skb_network_header(skb);
371
+
372
+ if (ip.v4->version == 4) {
373
+ l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
374
+ l4_proto = ip.v4->protocol;
375
+ } else if (ip.v4->version == 6) {
376
+ unsigned char *exthdr;
377
+ __be16 frag_off;
378
+ l3_type = IPV6_PKT;
379
+ tunnel_type = TUNNEL_UDP_CSUM;
380
+ exthdr = ip.hdr + sizeof(*ip.v6);
381
+ l4_proto = ip.v6->nexthdr;
382
+ l4.hdr = skb_transport_header(skb);
383
+ if (l4.hdr != exthdr)
384
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
385
+ &l4_proto, &frag_off);
386
+ } else {
387
+ l3_type = L3TYPE_UNKNOWN;
388
+ l4_proto = IPPROTO_RAW;
389
+ }
390
+
391
+ hinic_task_set_outter_l3(task, l3_type,
392
+ skb_network_header_len(skb));
393
+
394
+ switch (l4_proto) {
395
+ case IPPROTO_UDP:
396
+ l4_tunnel_len = skb_inner_network_offset(skb) -
397
+ skb_transport_offset(skb);
398
+ ip.hdr = skb_inner_network_header(skb);
399
+ l4.hdr = skb_inner_transport_header(skb);
400
+ network_hdr_len = skb_inner_network_header_len(skb);
401
+ break;
402
+ case IPPROTO_IPIP:
403
+ case IPPROTO_IPV6:
404
+ tunnel_type = NOT_TUNNEL;
405
+ l4_tunnel_len = 0;
406
+
407
+ ip.hdr = skb_inner_network_header(skb);
408
+ l4.hdr = skb_transport_header(skb);
409
+ network_hdr_len = skb_network_header_len(skb);
410
+ break;
411
+ default:
412
+ /* Unsupported tunnel packet, disable csum offload */
413
+ skb_checksum_help(skb);
414
+ return 0;
415
+ }
416
+
417
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
418
+ } else {
419
+ ip.hdr = skb_network_header(skb);
420
+ l4.hdr = skb_transport_header(skb);
421
+ network_hdr_len = skb_network_header_len(skb);
422
+ }
423
+
424
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
425
+ &l4_proto);
426
+
427
+ hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
428
+
429
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
430
+ &l4_len, &offset);
431
+
432
+ hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
433
+
434
+ return 1;
435
+}
436
+
437
+static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
438
+ u16 vlan_tag, u16 vlan_pri)
439
+{
440
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
441
+ HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
442
+
443
+ *queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI);
444
+}
445
+
446
+static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
447
+ u32 *queue_info)
448
+{
449
+ enum hinic_offload_type offload = 0;
450
+ u16 vlan_tag;
451
+ int enabled;
452
+
453
+ enabled = offload_tso(task, queue_info, skb);
454
+ if (enabled > 0) {
455
+ offload |= TX_OFFLOAD_TSO;
456
+ } else if (enabled == 0) {
457
+ enabled = offload_csum(task, queue_info, skb);
458
+ if (enabled)
459
+ offload |= TX_OFFLOAD_CSUM;
460
+ } else {
461
+ return -EPROTONOSUPPORT;
462
+ }
463
+
464
+ if (unlikely(skb_vlan_tag_present(skb))) {
465
+ vlan_tag = skb_vlan_tag_get(skb);
466
+ offload_vlan(task, queue_info, vlan_tag,
467
+ vlan_tag >> VLAN_PRIO_SHIFT);
468
+ offload |= TX_OFFLOAD_VLAN;
469
+ }
470
+
471
+ if (offload)
472
+ hinic_task_set_l2hdr(task, skb_network_offset(skb));
473
+
474
+ /* payload offset should not more than 221 */
475
+ if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
476
+ MAX_PAYLOAD_OFFSET) {
477
+ return -EPROTONOSUPPORT;
478
+ }
479
+
480
+ /* mss should not less than 80 */
481
+ if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
482
+ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
483
+ *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
484
+ }
485
+
486
+ return 0;
487
+}
488
+
489
+netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
179490 {
180491 struct hinic_dev *nic_dev = netdev_priv(netdev);
492
+ u16 prod_idx, q_id = skb->queue_mapping;
181493 struct netdev_queue *netdev_txq;
182494 int nr_sges, err = NETDEV_TX_OK;
183495 struct hinic_sq_wqe *sq_wqe;
184496 unsigned int wqe_size;
185497 struct hinic_txq *txq;
186498 struct hinic_qp *qp;
187
- u16 prod_idx;
188499
189
- txq = &nic_dev->txqs[skb->queue_mapping];
500
+ txq = &nic_dev->txqs[q_id];
501
+ qp = container_of(txq->sq, struct hinic_qp, sq);
502
+ nr_sges = skb_shinfo(skb)->nr_frags + 1;
503
+
504
+ err = tx_map_skb(nic_dev, skb, txq->sges);
505
+ if (err)
506
+ goto skb_error;
507
+
508
+ wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
509
+
510
+ sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
511
+ if (!sq_wqe) {
512
+ netif_stop_subqueue(netdev, qp->q_id);
513
+
514
+ sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
515
+ if (sq_wqe) {
516
+ netif_wake_subqueue(nic_dev->netdev, qp->q_id);
517
+ goto process_sq_wqe;
518
+ }
519
+
520
+ tx_unmap_skb(nic_dev, skb, txq->sges);
521
+
522
+ u64_stats_update_begin(&txq->txq_stats.syncp);
523
+ txq->txq_stats.tx_busy++;
524
+ u64_stats_update_end(&txq->txq_stats.syncp);
525
+ err = NETDEV_TX_BUSY;
526
+ wqe_size = 0;
527
+ goto flush_skbs;
528
+ }
529
+
530
+process_sq_wqe:
531
+ hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
532
+ hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
533
+
534
+flush_skbs:
535
+ netdev_txq = netdev_get_tx_queue(netdev, q_id);
536
+ if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
537
+ hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
538
+
539
+ return err;
540
+
541
+skb_error:
542
+ dev_kfree_skb_any(skb);
543
+ u64_stats_update_begin(&txq->txq_stats.syncp);
544
+ txq->txq_stats.tx_dropped++;
545
+ u64_stats_update_end(&txq->txq_stats.syncp);
546
+
547
+ return NETDEV_TX_OK;
548
+}
549
+
550
+netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
551
+{
552
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
553
+ u16 prod_idx, q_id = skb->queue_mapping;
554
+ struct netdev_queue *netdev_txq;
555
+ int nr_sges, err = NETDEV_TX_OK;
556
+ struct hinic_sq_wqe *sq_wqe;
557
+ unsigned int wqe_size;
558
+ struct hinic_txq *txq;
559
+ struct hinic_qp *qp;
560
+
561
+ txq = &nic_dev->txqs[q_id];
190562 qp = container_of(txq->sq, struct hinic_qp, sq);
191563
192564 if (skb->len < MIN_SKB_LEN) {
....@@ -199,6 +571,12 @@
199571 }
200572
201573 nr_sges = skb_shinfo(skb)->nr_frags + 1;
574
+ if (nr_sges > 17) {
575
+ u64_stats_update_begin(&txq->txq_stats.syncp);
576
+ txq->txq_stats.big_frags_pkts++;
577
+ u64_stats_update_end(&txq->txq_stats.syncp);
578
+ }
579
+
202580 if (nr_sges > txq->max_sges) {
203581 netdev_err(netdev, "Too many Tx sges\n");
204582 goto skb_error;
....@@ -236,14 +614,22 @@
236614 process_sq_wqe:
237615 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
238616
617
+ err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
618
+ if (err)
619
+ goto offload_error;
620
+
239621 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
240622
241623 flush_skbs:
242
- netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping);
243
- if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
624
+ netdev_txq = netdev_get_tx_queue(netdev, q_id);
625
+ if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
244626 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
245627
246628 return err;
629
+
630
+offload_error:
631
+ hinic_sq_return_wqe(txq->sq, wqe_size);
632
+ tx_unmap_skb(nic_dev, skb, txq->sges);
247633
248634 skb_error:
249635 dev_kfree_skb_any(skb);
....@@ -252,7 +638,8 @@
252638 u64_stats_update_begin(&txq->txq_stats.syncp);
253639 txq->txq_stats.tx_dropped++;
254640 u64_stats_update_end(&txq->txq_stats.syncp);
255
- return err;
641
+
642
+ return NETDEV_TX_OK;
256643 }
257644
258645 /**
....@@ -323,6 +710,8 @@
323710 do {
324711 hw_ci = HW_CONS_IDX(sq) & wq->mask;
325712
713
+ dma_rmb();
714
+
326715 /* Reading a WQEBB to get real WQE size and consumer index. */
327716 sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
328717 if ((!sq_wqe) ||
....@@ -355,8 +744,8 @@
355744 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
356745
357746 __netif_tx_lock(netdev_txq, smp_processor_id());
358
-
359
- netif_wake_subqueue(nic_dev->netdev, qp->q_id);
747
+ if (!netif_testing(nic_dev->netdev))
748
+ netif_wake_subqueue(nic_dev->netdev, qp->q_id);
360749
361750 __netif_tx_unlock(netdev_txq);
362751
....@@ -372,23 +761,15 @@
372761
373762 if (pkts < budget) {
374763 napi_complete(napi);
375
- enable_irq(sq->irq);
764
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
765
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
766
+ sq->msix_entry,
767
+ HINIC_MSIX_ENABLE);
768
+
376769 return pkts;
377770 }
378771
379772 return budget;
380
-}
381
-
382
-static void tx_napi_add(struct hinic_txq *txq, int weight)
383
-{
384
- netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
385
- napi_enable(&txq->napi);
386
-}
387
-
388
-static void tx_napi_del(struct hinic_txq *txq)
389
-{
390
- napi_disable(&txq->napi);
391
- netif_napi_del(&txq->napi);
392773 }
393774
394775 static irqreturn_t tx_irq(int irq, void *data)
....@@ -398,8 +779,11 @@
398779
399780 nic_dev = netdev_priv(txq->netdev);
400781
401
- /* Disable the interrupt until napi will be completed */
402
- disable_irq_nosync(txq->sq->irq);
782
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
783
+ /* Disable the interrupt until napi will be completed */
784
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
785
+ txq->sq->msix_entry,
786
+ HINIC_MSIX_DISABLE);
403787
404788 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
405789
....@@ -410,23 +794,42 @@
410794 static int tx_request_irq(struct hinic_txq *txq)
411795 {
412796 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
797
+ struct hinic_msix_config interrupt_info = {0};
798
+ struct hinic_intr_coal_info *intr_coal = NULL;
413799 struct hinic_hwdev *hwdev = nic_dev->hwdev;
414800 struct hinic_hwif *hwif = hwdev->hwif;
415801 struct pci_dev *pdev = hwif->pdev;
416802 struct hinic_sq *sq = txq->sq;
803
+ struct hinic_qp *qp;
417804 int err;
418805
419
- tx_napi_add(txq, nic_dev->tx_weight);
806
+ qp = container_of(sq, struct hinic_qp, sq);
807
+
808
+ netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight);
420809
421810 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
422811 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
423812 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
424813 TX_IRQ_NO_RESEND_TIMER);
425814
815
+ intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id];
816
+ interrupt_info.msix_index = sq->msix_entry;
817
+ interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
818
+ interrupt_info.pending_cnt = intr_coal->pending_limt;
819
+ interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
820
+
821
+ err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
822
+ if (err) {
823
+ netif_err(nic_dev, drv, txq->netdev,
824
+ "Failed to set TX interrupt coalescing attribute\n");
825
+ netif_napi_del(&txq->napi);
826
+ return err;
827
+ }
828
+
426829 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
427830 if (err) {
428831 dev_err(&pdev->dev, "Failed to request Tx irq\n");
429
- tx_napi_del(txq);
832
+ netif_napi_del(&txq->napi);
430833 return err;
431834 }
432835
....@@ -438,7 +841,7 @@
438841 struct hinic_sq *sq = txq->sq;
439842
440843 free_irq(sq->irq, txq);
441
- tx_napi_del(txq);
844
+ netif_napi_del(&txq->napi);
442845 }
443846
444847 /**
....@@ -456,7 +859,6 @@
456859 struct hinic_dev *nic_dev = netdev_priv(netdev);
457860 struct hinic_hwdev *hwdev = nic_dev->hwdev;
458861 int err, irqname_len;
459
- size_t sges_size;
460862
461863 txq->netdev = netdev;
462864 txq->sq = sq;
....@@ -465,26 +867,26 @@
465867
466868 txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
467869
468
- sges_size = txq->max_sges * sizeof(*txq->sges);
469
- txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
870
+ txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
871
+ sizeof(*txq->sges), GFP_KERNEL);
470872 if (!txq->sges)
471873 return -ENOMEM;
472874
473
- sges_size = txq->max_sges * sizeof(*txq->free_sges);
474
- txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
875
+ txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
876
+ sizeof(*txq->free_sges), GFP_KERNEL);
475877 if (!txq->free_sges) {
476878 err = -ENOMEM;
477879 goto err_alloc_free_sges;
478880 }
479881
480
- irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1;
882
+ irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
481883 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
482884 if (!txq->irq_name) {
483885 err = -ENOMEM;
484886 goto err_alloc_irqname;
485887 }
486888
487
- sprintf(txq->irq_name, "hinic_txq%d", qp->q_id);
889
+ sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
488890
489891 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
490892 CI_UPDATE_NO_COALESC);