hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/net/ethernet/sfc/tx.c
....@@ -1,11 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /****************************************************************************
23 * Driver for Solarflare network controllers and boards
34 * Copyright 2005-2006 Fen Systems Ltd.
45 * Copyright 2005-2013 Solarflare Communications Inc.
5
- *
6
- * This program is free software; you can redistribute it and/or modify it
7
- * under the terms of the GNU General Public License version 2 as published
8
- * by the Free Software Foundation, incorporated herein by reference.
96 */
107
118 #include <linux/pci.h>
....@@ -23,6 +20,7 @@
2320 #include "io.h"
2421 #include "nic.h"
2522 #include "tx.h"
23
+#include "tx_common.h"
2624 #include "workarounds.h"
2725 #include "ef10_regs.h"
2826
....@@ -59,79 +57,14 @@
5957 return efx_tx_get_copy_buffer(tx_queue, buffer);
6058 }
6159
62
-static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63
- struct efx_tx_buffer *buffer,
64
- unsigned int *pkts_compl,
65
- unsigned int *bytes_compl)
66
-{
67
- if (buffer->unmap_len) {
68
- struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
69
- dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
70
- if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
71
- dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
72
- DMA_TO_DEVICE);
73
- else
74
- dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
75
- DMA_TO_DEVICE);
76
- buffer->unmap_len = 0;
77
- }
78
-
79
- if (buffer->flags & EFX_TX_BUF_SKB) {
80
- struct sk_buff *skb = (struct sk_buff *)buffer->skb;
81
-
82
- EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
83
- (*pkts_compl)++;
84
- (*bytes_compl) += skb->len;
85
- if (tx_queue->timestamping &&
86
- (tx_queue->completed_timestamp_major ||
87
- tx_queue->completed_timestamp_minor)) {
88
- struct skb_shared_hwtstamps hwtstamp;
89
-
90
- hwtstamp.hwtstamp =
91
- efx_ptp_nic_to_kernel_time(tx_queue);
92
- skb_tstamp_tx(skb, &hwtstamp);
93
-
94
- tx_queue->completed_timestamp_major = 0;
95
- tx_queue->completed_timestamp_minor = 0;
96
- }
97
- dev_consume_skb_any((struct sk_buff *)buffer->skb);
98
- netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
99
- "TX queue %d transmission id %x complete\n",
100
- tx_queue->queue, tx_queue->read_count);
101
- }
102
-
103
- buffer->len = 0;
104
- buffer->flags = 0;
105
-}
106
-
107
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
108
-{
109
- /* Header and payload descriptor for each output segment, plus
110
- * one for every input fragment boundary within a segment
111
- */
112
- unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
113
-
114
- /* Possibly one more per segment for option descriptors */
115
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
116
- max_descs += EFX_TSO_MAX_SEGS;
117
-
118
- /* Possibly more for PCIe page boundaries within input fragments */
119
- if (PAGE_SIZE > EFX_PAGE_SIZE)
120
- max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
121
- DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
122
-
123
- return max_descs;
124
-}
125
-
12660 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
12761 {
128
- /* We need to consider both queues that the net core sees as one */
129
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
62
+ /* We need to consider all queues that the net core sees as one */
13063 struct efx_nic *efx = txq1->efx;
64
+ struct efx_tx_queue *txq2;
13165 unsigned int fill_level;
13266
133
- fill_level = max(txq1->insert_count - txq1->old_read_count,
134
- txq2->insert_count - txq2->old_read_count);
67
+ fill_level = efx_channel_tx_old_fill_level(txq1->channel);
13568 if (likely(fill_level < efx->txq_stop_thresh))
13669 return;
13770
....@@ -151,11 +84,10 @@
15184 */
15285 netif_tx_stop_queue(txq1->core_txq);
15386 smp_mb();
154
- txq1->old_read_count = READ_ONCE(txq1->read_count);
155
- txq2->old_read_count = READ_ONCE(txq2->read_count);
87
+ efx_for_each_channel_tx_queue(txq2, txq1->channel)
88
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
15689
157
- fill_level = max(txq1->insert_count - txq1->old_read_count,
158
- txq2->insert_count - txq2->old_read_count);
90
+ fill_level = efx_channel_tx_old_fill_level(txq1->channel);
15991 EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
16092 if (likely(fill_level < efx->txq_stop_thresh)) {
16193 smp_mb();
....@@ -277,7 +209,7 @@
277209
278210 vaddr = kmap_atomic(skb_frag_page(f));
279211
280
- efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
212
+ efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
281213 skb_frag_size(f), copy_buf);
282214 kunmap_atomic(vaddr);
283215 }
....@@ -332,159 +264,43 @@
332264 ++tx_queue->insert_count;
333265 return 0;
334266 }
267
+
268
+/* Decide whether we can use TX PIO, ie. write packet data directly into
269
+ * a buffer on the device. This can reduce latency at the expense of
270
+ * throughput, so we only do this if both hardware and software TX rings
271
+ * are empty, including all queues for the channel. This also ensures that
272
+ * only one packet at a time can be using the PIO buffer. If the xmit_more
273
+ * flag is set then we don't use this - there'll be another packet along
274
+ * shortly and we want to hold off the doorbell.
275
+ */
276
+static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue)
277
+{
278
+ struct efx_channel *channel = tx_queue->channel;
279
+
280
+ if (!tx_queue->piobuf)
281
+ return false;
282
+
283
+ EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors);
284
+
285
+ efx_for_each_channel_tx_queue(tx_queue, channel)
286
+ if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count))
287
+ return false;
288
+
289
+ return true;
290
+}
335291 #endif /* EFX_USE_PIO */
336292
337
-static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
338
- dma_addr_t dma_addr,
339
- size_t len)
340
-{
341
- const struct efx_nic_type *nic_type = tx_queue->efx->type;
342
- struct efx_tx_buffer *buffer;
343
- unsigned int dma_len;
344
-
345
- /* Map the fragment taking account of NIC-dependent DMA limits. */
346
- do {
347
- buffer = efx_tx_queue_get_insert_buffer(tx_queue);
348
- dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
349
-
350
- buffer->len = dma_len;
351
- buffer->dma_addr = dma_addr;
352
- buffer->flags = EFX_TX_BUF_CONT;
353
- len -= dma_len;
354
- dma_addr += dma_len;
355
- ++tx_queue->insert_count;
356
- } while (len);
357
-
358
- return buffer;
359
-}
360
-
361
-/* Map all data from an SKB for DMA and create descriptors on the queue.
293
+/* Send any pending traffic for a channel. xmit_more is shared across all
294
+ * queues for a channel, so we must check all of them.
362295 */
363
-static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
364
- unsigned int segment_count)
296
+static void efx_tx_send_pending(struct efx_channel *channel)
365297 {
366
- struct efx_nic *efx = tx_queue->efx;
367
- struct device *dma_dev = &efx->pci_dev->dev;
368
- unsigned int frag_index, nr_frags;
369
- dma_addr_t dma_addr, unmap_addr;
370
- unsigned short dma_flags;
371
- size_t len, unmap_len;
298
+ struct efx_tx_queue *q;
372299
373
- nr_frags = skb_shinfo(skb)->nr_frags;
374
- frag_index = 0;
375
-
376
- /* Map header data. */
377
- len = skb_headlen(skb);
378
- dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
379
- dma_flags = EFX_TX_BUF_MAP_SINGLE;
380
- unmap_len = len;
381
- unmap_addr = dma_addr;
382
-
383
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
384
- return -EIO;
385
-
386
- if (segment_count) {
387
- /* For TSO we need to put the header in to a separate
388
- * descriptor. Map this separately if necessary.
389
- */
390
- size_t header_len = skb_transport_header(skb) - skb->data +
391
- (tcp_hdr(skb)->doff << 2u);
392
-
393
- if (header_len != len) {
394
- tx_queue->tso_long_headers++;
395
- efx_tx_map_chunk(tx_queue, dma_addr, header_len);
396
- len -= header_len;
397
- dma_addr += header_len;
398
- }
300
+ efx_for_each_channel_tx_queue(q, channel) {
301
+ if (q->xmit_pending)
302
+ efx_nic_push_buffers(q);
399303 }
400
-
401
- /* Add descriptors for each fragment. */
402
- do {
403
- struct efx_tx_buffer *buffer;
404
- skb_frag_t *fragment;
405
-
406
- buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
407
-
408
- /* The final descriptor for a fragment is responsible for
409
- * unmapping the whole fragment.
410
- */
411
- buffer->flags = EFX_TX_BUF_CONT | dma_flags;
412
- buffer->unmap_len = unmap_len;
413
- buffer->dma_offset = buffer->dma_addr - unmap_addr;
414
-
415
- if (frag_index >= nr_frags) {
416
- /* Store SKB details with the final buffer for
417
- * the completion.
418
- */
419
- buffer->skb = skb;
420
- buffer->flags = EFX_TX_BUF_SKB | dma_flags;
421
- return 0;
422
- }
423
-
424
- /* Move on to the next fragment. */
425
- fragment = &skb_shinfo(skb)->frags[frag_index++];
426
- len = skb_frag_size(fragment);
427
- dma_addr = skb_frag_dma_map(dma_dev, fragment,
428
- 0, len, DMA_TO_DEVICE);
429
- dma_flags = 0;
430
- unmap_len = len;
431
- unmap_addr = dma_addr;
432
-
433
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
434
- return -EIO;
435
- } while (1);
436
-}
437
-
438
-/* Remove buffers put into a tx_queue for the current packet.
439
- * None of the buffers must have an skb attached.
440
- */
441
-static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
442
- unsigned int insert_count)
443
-{
444
- struct efx_tx_buffer *buffer;
445
- unsigned int bytes_compl = 0;
446
- unsigned int pkts_compl = 0;
447
-
448
- /* Work backwards until we hit the original insert pointer value */
449
- while (tx_queue->insert_count != insert_count) {
450
- --tx_queue->insert_count;
451
- buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
452
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
453
- }
454
-}
455
-
456
-/*
457
- * Fallback to software TSO.
458
- *
459
- * This is used if we are unable to send a GSO packet through hardware TSO.
460
- * This should only ever happen due to per-queue restrictions - unsupported
461
- * packets should first be filtered by the feature flags.
462
- *
463
- * Returns 0 on success, error code otherwise.
464
- */
465
-static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
466
- struct sk_buff *skb)
467
-{
468
- struct sk_buff *segments, *next;
469
-
470
- segments = skb_gso_segment(skb, 0);
471
- if (IS_ERR(segments))
472
- return PTR_ERR(segments);
473
-
474
- dev_kfree_skb_any(skb);
475
- skb = segments;
476
-
477
- while (skb) {
478
- next = skb->next;
479
- skb->next = NULL;
480
-
481
- if (next)
482
- skb->xmit_more = true;
483
- efx_enqueue_skb(tx_queue, skb);
484
- skb = next;
485
- }
486
-
487
- return 0;
488304 }
489305
490306 /*
....@@ -503,10 +319,10 @@
503319 * Returns NETDEV_TX_OK.
504320 * You must hold netif_tx_lock() to call this function.
505321 */
506
-netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
322
+netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
507323 {
508324 unsigned int old_insert_count = tx_queue->insert_count;
509
- bool xmit_more = skb->xmit_more;
325
+ bool xmit_more = netdev_xmit_more();
510326 bool data_mapped = false;
511327 unsigned int segments;
512328 unsigned int skb_len;
....@@ -522,8 +338,18 @@
522338 * size limit.
523339 */
524340 if (segments) {
525
- EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
526
- rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
341
+ switch (tx_queue->tso_version) {
342
+ case 1:
343
+ rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped);
344
+ break;
345
+ case 2:
346
+ rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped);
347
+ break;
348
+ case 0: /* No TSO on this queue, SW fallback needed */
349
+ default:
350
+ rc = -EINVAL;
351
+ break;
352
+ }
527353 if (rc == -EINVAL) {
528354 rc = efx_tx_tso_fallback(tx_queue, skb);
529355 tx_queue->tso_fallbacks++;
....@@ -533,8 +359,8 @@
533359 if (rc)
534360 goto err;
535361 #ifdef EFX_USE_PIO
536
- } else if (skb_len <= efx_piobuf_size && !skb->xmit_more &&
537
- efx_nic_may_tx_pio(tx_queue)) {
362
+ } else if (skb_len <= efx_piobuf_size && !xmit_more &&
363
+ efx_tx_may_pio(tx_queue)) {
538364 /* Use PIO for short packets with an empty queue. */
539365 if (efx_enqueue_skb_pio(tx_queue, skb))
540366 goto err;
....@@ -553,26 +379,13 @@
553379 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
554380 goto err;
555381
556
- /* Update BQL */
557
- netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
558
-
559382 efx_tx_maybe_stop_queue(tx_queue);
560383
384
+ tx_queue->xmit_pending = true;
385
+
561386 /* Pass off to hardware */
562
- if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
563
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
564
-
565
- /* There could be packets left on the partner queue if those
566
- * SKBs had skb->xmit_more set. If we do not push those they
567
- * could be left for a long time and cause a netdev watchdog.
568
- */
569
- if (txq2->xmit_more_available)
570
- efx_nic_push_buffers(txq2);
571
-
572
- efx_nic_push_buffers(tx_queue);
573
- } else {
574
- tx_queue->xmit_more_available = skb->xmit_more;
575
- }
387
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
388
+ efx_tx_send_pending(tx_queue->channel);
576389
577390 if (segments) {
578391 tx_queue->tso_bursts++;
....@@ -593,61 +406,105 @@
593406 * on this queue or a partner queue then we need to push here to get the
594407 * previous packets out.
595408 */
596
- if (!xmit_more) {
597
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
598
-
599
- if (txq2->xmit_more_available)
600
- efx_nic_push_buffers(txq2);
601
-
602
- efx_nic_push_buffers(tx_queue);
603
- }
409
+ if (!xmit_more)
410
+ efx_tx_send_pending(tx_queue->channel);
604411
605412 return NETDEV_TX_OK;
606413 }
607414
608
-/* Remove packets from the TX queue
609
- *
610
- * This removes packets from the TX queue, up to and including the
611
- * specified index.
612
- */
613
-static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
614
- unsigned int index,
615
- unsigned int *pkts_compl,
616
- unsigned int *bytes_compl)
415
+static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs)
617416 {
618
- struct efx_nic *efx = tx_queue->efx;
619
- unsigned int stop_index, read_ptr;
417
+ int i;
620418
621
- stop_index = (index + 1) & tx_queue->ptr_mask;
622
- read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
419
+ for (i = 0; i < n; i++)
420
+ xdp_return_frame_rx_napi(xdpfs[i]);
421
+}
623422
624
- while (read_ptr != stop_index) {
625
- struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
423
+/* Transmit a packet from an XDP buffer
424
+ *
425
+ * Returns number of packets sent on success, error code otherwise.
426
+ * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
427
+ * (for XDP redirect).
428
+ */
429
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
430
+ bool flush)
431
+{
432
+ struct efx_tx_buffer *tx_buffer;
433
+ struct efx_tx_queue *tx_queue;
434
+ struct xdp_frame *xdpf;
435
+ dma_addr_t dma_addr;
436
+ unsigned int len;
437
+ int space;
438
+ int cpu;
439
+ int i;
626440
627
- if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
628
- unlikely(buffer->len == 0)) {
629
- netif_err(efx, tx_err, efx->net_dev,
630
- "TX queue %d spurious TX completion id %x\n",
631
- tx_queue->queue, read_ptr);
632
- efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
633
- return;
634
- }
441
+ cpu = raw_smp_processor_id();
635442
636
- efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
443
+ if (!efx->xdp_tx_queue_count ||
444
+ unlikely(cpu >= efx->xdp_tx_queue_count))
445
+ return -EINVAL;
637446
638
- ++tx_queue->read_count;
639
- read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
447
+ tx_queue = efx->xdp_tx_queues[cpu];
448
+ if (unlikely(!tx_queue))
449
+ return -EINVAL;
450
+
451
+ if (unlikely(n && !xdpfs))
452
+ return -EINVAL;
453
+
454
+ if (!n)
455
+ return 0;
456
+
457
+ /* Check for available space. We should never need multiple
458
+ * descriptors per frame.
459
+ */
460
+ space = efx->txq_entries +
461
+ tx_queue->read_count - tx_queue->insert_count;
462
+
463
+ for (i = 0; i < n; i++) {
464
+ xdpf = xdpfs[i];
465
+
466
+ if (i >= space)
467
+ break;
468
+
469
+ /* We'll want a descriptor for this tx. */
470
+ prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
471
+
472
+ len = xdpf->len;
473
+
474
+ /* Map for DMA. */
475
+ dma_addr = dma_map_single(&efx->pci_dev->dev,
476
+ xdpf->data, len,
477
+ DMA_TO_DEVICE);
478
+ if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
479
+ break;
480
+
481
+ /* Create descriptor and set up for unmapping DMA. */
482
+ tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
483
+ tx_buffer->xdpf = xdpf;
484
+ tx_buffer->flags = EFX_TX_BUF_XDP |
485
+ EFX_TX_BUF_MAP_SINGLE;
486
+ tx_buffer->dma_offset = 0;
487
+ tx_buffer->unmap_len = len;
488
+ tx_queue->tx_packets++;
640489 }
490
+
491
+ /* Pass mapped frames to hardware. */
492
+ if (flush && i > 0)
493
+ efx_nic_push_buffers(tx_queue);
494
+
495
+ if (i == 0)
496
+ return -EIO;
497
+
498
+ efx_xdp_return_frames(n - i, xdpfs + i);
499
+
500
+ return i;
641501 }
642502
643503 /* Initiate a packet transmission. We use one channel per CPU
644
- * (sharing when we have more CPUs than channels). On Falcon, the TX
645
- * completion events will be directed back to the CPU that transmitted
646
- * the packet, which should be cache-efficient.
504
+ * (sharing when we have more CPUs than channels).
647505 *
648506 * Context: non-blocking.
649
- * Note that returning anything other than NETDEV_TX_OK will cause the
650
- * OS to free the skb.
507
+ * Should always return NETDEV_TX_OK and consume the skb.
651508 */
652509 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
653510 struct net_device *net_dev)
....@@ -658,21 +515,79 @@
658515
659516 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
660517
661
- /* PTP "event" packet */
662
- if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
663
- unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
664
- return efx_ptp_tx(efx, skb);
665
- }
666
-
667518 index = skb_get_queue_mapping(skb);
668
- type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
519
+ type = efx_tx_csum_type_skb(skb);
669520 if (index >= efx->n_tx_channels) {
670521 index -= efx->n_tx_channels;
671522 type |= EFX_TXQ_TYPE_HIGHPRI;
672523 }
673
- tx_queue = efx_get_tx_queue(efx, index, type);
674524
675
- return efx_enqueue_skb(tx_queue, skb);
525
+ /* PTP "event" packet */
526
+ if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
527
+ unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
528
+ /* There may be existing transmits on the channel that are
529
+ * waiting for this packet to trigger the doorbell write.
530
+ * We need to send the packets at this point.
531
+ */
532
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
533
+ return efx_ptp_tx(efx, skb);
534
+ }
535
+
536
+ tx_queue = efx_get_tx_queue(efx, index, type);
537
+ if (WARN_ON_ONCE(!tx_queue)) {
538
+ /* We don't have a TXQ of the right type.
539
+ * This should never happen, as we don't advertise offload
540
+ * features unless we can support them.
541
+ */
542
+ dev_kfree_skb_any(skb);
543
+ /* If we're not expecting another transmit and we had something to push
544
+ * on this queue or a partner queue then we need to push here to get the
545
+ * previous packets out.
546
+ */
547
+ if (!netdev_xmit_more())
548
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
549
+ return NETDEV_TX_OK;
550
+ }
551
+
552
+ return __efx_enqueue_skb(tx_queue, skb);
553
+}
554
+
555
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
556
+{
557
+ unsigned int pkts_compl = 0, bytes_compl = 0;
558
+ unsigned int read_ptr;
559
+ bool finished = false;
560
+
561
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
562
+
563
+ while (!finished) {
564
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
565
+
566
+ if (!efx_tx_buffer_in_use(buffer)) {
567
+ struct efx_nic *efx = tx_queue->efx;
568
+
569
+ netif_err(efx, hw, efx->net_dev,
570
+ "TX queue %d spurious single TX completion\n",
571
+ tx_queue->queue);
572
+ efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
573
+ return;
574
+ }
575
+
576
+ /* Need to check the flag before dequeueing. */
577
+ if (buffer->flags & EFX_TX_BUF_SKB)
578
+ finished = true;
579
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
580
+
581
+ ++tx_queue->read_count;
582
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
583
+ }
584
+
585
+ tx_queue->pkts_compl += pkts_compl;
586
+ tx_queue->bytes_compl += bytes_compl;
587
+
588
+ EFX_WARN_ON_PARANOID(pkts_compl != 1);
589
+
590
+ efx_xmit_done_check_empty(tx_queue);
676591 }
677592
678593 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
....@@ -682,8 +597,8 @@
682597 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
683598 tx_queue->core_txq =
684599 netdev_get_tx_queue(efx->net_dev,
685
- tx_queue->queue / EFX_TXQ_TYPES +
686
- ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
600
+ tx_queue->channel->channel +
601
+ ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
687602 efx->n_tx_channels : 0));
688603 }
689604
....@@ -692,12 +607,13 @@
692607 {
693608 struct efx_nic *efx = netdev_priv(net_dev);
694609 struct tc_mqprio_qopt *mqprio = type_data;
695
- struct efx_channel *channel;
696
- struct efx_tx_queue *tx_queue;
697610 unsigned tc, num_tc;
698
- int rc;
699611
700612 if (type != TC_SETUP_QDISC_MQPRIO)
613
+ return -EOPNOTSUPP;
614
+
615
+ /* Only Siena supported highpri queues */
616
+ if (efx_nic_rev(efx) > EFX_REV_SIENA_A0)
701617 return -EOPNOTSUPP;
702618
703619 num_tc = mqprio->num_tc;
....@@ -715,208 +631,9 @@
715631 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
716632 }
717633
718
- if (num_tc > net_dev->num_tc) {
719
- /* Initialise high-priority queues as necessary */
720
- efx_for_each_channel(channel, efx) {
721
- efx_for_each_possible_channel_tx_queue(tx_queue,
722
- channel) {
723
- if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
724
- continue;
725
- if (!tx_queue->buffer) {
726
- rc = efx_probe_tx_queue(tx_queue);
727
- if (rc)
728
- return rc;
729
- }
730
- if (!tx_queue->initialised)
731
- efx_init_tx_queue(tx_queue);
732
- efx_init_tx_queue_core_txq(tx_queue);
733
- }
734
- }
735
- } else {
736
- /* Reduce number of classes before number of queues */
737
- net_dev->num_tc = num_tc;
738
- }
739
-
740
- rc = netif_set_real_num_tx_queues(net_dev,
741
- max_t(int, num_tc, 1) *
742
- efx->n_tx_channels);
743
- if (rc)
744
- return rc;
745
-
746
- /* Do not destroy high-priority queues when they become
747
- * unused. We would have to flush them first, and it is
748
- * fairly difficult to flush a subset of TX queues. Leave
749
- * it to efx_fini_channels().
750
- */
751
-
752634 net_dev->num_tc = num_tc;
753
- return 0;
754
-}
755635
756
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
757
-{
758
- unsigned fill_level;
759
- struct efx_nic *efx = tx_queue->efx;
760
- struct efx_tx_queue *txq2;
761
- unsigned int pkts_compl = 0, bytes_compl = 0;
762
-
763
- EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
764
-
765
- efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
766
- tx_queue->pkts_compl += pkts_compl;
767
- tx_queue->bytes_compl += bytes_compl;
768
-
769
- if (pkts_compl > 1)
770
- ++tx_queue->merge_events;
771
-
772
- /* See if we need to restart the netif queue. This memory
773
- * barrier ensures that we write read_count (inside
774
- * efx_dequeue_buffers()) before reading the queue status.
775
- */
776
- smp_mb();
777
- if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
778
- likely(efx->port_enabled) &&
779
- likely(netif_device_present(efx->net_dev))) {
780
- txq2 = efx_tx_queue_partner(tx_queue);
781
- fill_level = max(tx_queue->insert_count - tx_queue->read_count,
782
- txq2->insert_count - txq2->read_count);
783
- if (fill_level <= efx->txq_wake_thresh)
784
- netif_tx_wake_queue(tx_queue->core_txq);
785
- }
786
-
787
- /* Check whether the hardware queue is now empty */
788
- if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
789
- tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
790
- if (tx_queue->read_count == tx_queue->old_write_count) {
791
- smp_mb();
792
- tx_queue->empty_read_count =
793
- tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
794
- }
795
- }
796
-}
797
-
798
-static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
799
-{
800
- return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
801
-}
802
-
803
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
804
-{
805
- struct efx_nic *efx = tx_queue->efx;
806
- unsigned int entries;
807
- int rc;
808
-
809
- /* Create the smallest power-of-two aligned ring */
810
- entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
811
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
812
- tx_queue->ptr_mask = entries - 1;
813
-
814
- netif_dbg(efx, probe, efx->net_dev,
815
- "creating TX queue %d size %#x mask %#x\n",
816
- tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
817
-
818
- /* Allocate software ring */
819
- tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
820
- GFP_KERNEL);
821
- if (!tx_queue->buffer)
822
- return -ENOMEM;
823
-
824
- tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
825
- sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
826
- if (!tx_queue->cb_page) {
827
- rc = -ENOMEM;
828
- goto fail1;
829
- }
830
-
831
- /* Allocate hardware ring */
832
- rc = efx_nic_probe_tx(tx_queue);
833
- if (rc)
834
- goto fail2;
835
-
836
- return 0;
837
-
838
-fail2:
839
- kfree(tx_queue->cb_page);
840
- tx_queue->cb_page = NULL;
841
-fail1:
842
- kfree(tx_queue->buffer);
843
- tx_queue->buffer = NULL;
844
- return rc;
845
-}
846
-
847
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
848
-{
849
- struct efx_nic *efx = tx_queue->efx;
850
-
851
- netif_dbg(efx, drv, efx->net_dev,
852
- "initialising TX queue %d\n", tx_queue->queue);
853
-
854
- tx_queue->insert_count = 0;
855
- tx_queue->write_count = 0;
856
- tx_queue->packet_write_count = 0;
857
- tx_queue->old_write_count = 0;
858
- tx_queue->read_count = 0;
859
- tx_queue->old_read_count = 0;
860
- tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
861
- tx_queue->xmit_more_available = false;
862
- tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
863
- tx_queue->channel == efx_ptp_channel(efx));
864
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
865
- tx_queue->completed_timestamp_major = 0;
866
- tx_queue->completed_timestamp_minor = 0;
867
-
868
- /* Set up default function pointers. These may get replaced by
869
- * efx_nic_init_tx() based off NIC/queue capabilities.
870
- */
871
- tx_queue->handle_tso = efx_enqueue_skb_tso;
872
-
873
- /* Set up TX descriptor ring */
874
- efx_nic_init_tx(tx_queue);
875
-
876
- tx_queue->initialised = true;
877
-}
878
-
879
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
880
-{
881
- struct efx_tx_buffer *buffer;
882
-
883
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
884
- "shutting down TX queue %d\n", tx_queue->queue);
885
-
886
- if (!tx_queue->buffer)
887
- return;
888
-
889
- /* Free any buffers left in the ring */
890
- while (tx_queue->read_count != tx_queue->write_count) {
891
- unsigned int pkts_compl = 0, bytes_compl = 0;
892
- buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
893
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
894
-
895
- ++tx_queue->read_count;
896
- }
897
- tx_queue->xmit_more_available = false;
898
- netdev_tx_reset_queue(tx_queue->core_txq);
899
-}
900
-
901
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
902
-{
903
- int i;
904
-
905
- if (!tx_queue->buffer)
906
- return;
907
-
908
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
909
- "destroying TX queue %d\n", tx_queue->queue);
910
- efx_nic_remove_tx(tx_queue);
911
-
912
- if (tx_queue->cb_page) {
913
- for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
914
- efx_nic_free_buffer(tx_queue->efx,
915
- &tx_queue->cb_page[i]);
916
- kfree(tx_queue->cb_page);
917
- tx_queue->cb_page = NULL;
918
- }
919
-
920
- kfree(tx_queue->buffer);
921
- tx_queue->buffer = NULL;
636
+ return netif_set_real_num_tx_queues(net_dev,
637
+ max_t(int, num_tc, 1) *
638
+ efx->n_tx_channels);
922639 }