hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/net/ethernet/sfc/efx.c
....@@ -1,11 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /****************************************************************************
23 * Driver for Solarflare network controllers and boards
34 * Copyright 2005-2006 Fen Systems Ltd.
45 * Copyright 2005-2013 Solarflare Communications Inc.
5
- *
6
- * This program is free software; you can redistribute it and/or modify it
7
- * under the terms of the GNU General Public License version 2 as published
8
- * by the Free Software Foundation, incorporated herein by reference.
96 */
107
118 #include <linux/module.h>
....@@ -26,104 +23,32 @@
2623 #include <net/gre.h>
2724 #include <net/udp_tunnel.h>
2825 #include "efx.h"
26
+#include "efx_common.h"
27
+#include "efx_channels.h"
28
+#include "ef100.h"
29
+#include "rx_common.h"
30
+#include "tx_common.h"
2931 #include "nic.h"
3032 #include "io.h"
3133 #include "selftest.h"
3234 #include "sriov.h"
3335
34
-#include "mcdi.h"
36
+#include "mcdi_port_common.h"
3537 #include "mcdi_pcol.h"
3638 #include "workarounds.h"
37
-
38
-/**************************************************************************
39
- *
40
- * Type name strings
41
- *
42
- **************************************************************************
43
- */
44
-
45
-/* Loopback mode names (see LOOPBACK_MODE()) */
46
-const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
47
-const char *const efx_loopback_mode_names[] = {
48
- [LOOPBACK_NONE] = "NONE",
49
- [LOOPBACK_DATA] = "DATAPATH",
50
- [LOOPBACK_GMAC] = "GMAC",
51
- [LOOPBACK_XGMII] = "XGMII",
52
- [LOOPBACK_XGXS] = "XGXS",
53
- [LOOPBACK_XAUI] = "XAUI",
54
- [LOOPBACK_GMII] = "GMII",
55
- [LOOPBACK_SGMII] = "SGMII",
56
- [LOOPBACK_XGBR] = "XGBR",
57
- [LOOPBACK_XFI] = "XFI",
58
- [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
59
- [LOOPBACK_GMII_FAR] = "GMII_FAR",
60
- [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
61
- [LOOPBACK_XFI_FAR] = "XFI_FAR",
62
- [LOOPBACK_GPHY] = "GPHY",
63
- [LOOPBACK_PHYXS] = "PHYXS",
64
- [LOOPBACK_PCS] = "PCS",
65
- [LOOPBACK_PMAPMD] = "PMA/PMD",
66
- [LOOPBACK_XPORT] = "XPORT",
67
- [LOOPBACK_XGMII_WS] = "XGMII_WS",
68
- [LOOPBACK_XAUI_WS] = "XAUI_WS",
69
- [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
70
- [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
71
- [LOOPBACK_GMII_WS] = "GMII_WS",
72
- [LOOPBACK_XFI_WS] = "XFI_WS",
73
- [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
74
- [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
75
-};
76
-
77
-const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
78
-const char *const efx_reset_type_names[] = {
79
- [RESET_TYPE_INVISIBLE] = "INVISIBLE",
80
- [RESET_TYPE_ALL] = "ALL",
81
- [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
82
- [RESET_TYPE_WORLD] = "WORLD",
83
- [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
84
- [RESET_TYPE_DATAPATH] = "DATAPATH",
85
- [RESET_TYPE_MC_BIST] = "MC_BIST",
86
- [RESET_TYPE_DISABLE] = "DISABLE",
87
- [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
88
- [RESET_TYPE_INT_ERROR] = "INT_ERROR",
89
- [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
90
- [RESET_TYPE_TX_SKIP] = "TX_SKIP",
91
- [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
92
- [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
93
-};
94
-
95
-/* UDP tunnel type names */
96
-static const char *const efx_udp_tunnel_type_names[] = {
97
- [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
98
- [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve",
99
-};
100
-
101
-void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
102
-{
103
- if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) &&
104
- efx_udp_tunnel_type_names[type] != NULL)
105
- snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]);
106
- else
107
- snprintf(buf, buflen, "type %d", type);
108
-}
109
-
110
-/* Reset workqueue. If any NIC has a hardware failure then a reset will be
111
- * queued onto this work queue. This is not a per-nic work queue, because
112
- * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
113
- */
114
-static struct workqueue_struct *reset_workqueue;
115
-
116
-/* How often and how many times to poll for a reset while waiting for a
117
- * BIST that another function started to complete.
118
- */
119
-#define BIST_WAIT_DELAY_MS 100
120
-#define BIST_WAIT_DELAY_COUNT 100
12139
12240 /**************************************************************************
12341 *
12442 * Configurable values
12543 *
12644 *************************************************************************/
45
+
46
+module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444);
47
+MODULE_PARM_DESC(interrupt_mode,
48
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
49
+
50
+module_param(rss_cpus, uint, 0444);
51
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
12752
12853 /*
12954 * Use separate channels for TX and RX events
....@@ -137,21 +62,6 @@
13762 module_param(efx_separate_tx_channels, bool, 0444);
13863 MODULE_PARM_DESC(efx_separate_tx_channels,
13964 "Use separate channels for TX and RX");
140
-
141
-/* This is the weight assigned to each of the (per-channel) virtual
142
- * NAPI devices.
143
- */
144
-static int napi_weight = 64;
145
-
146
-/* This is the time (in jiffies) between invocations of the hardware
147
- * monitor.
148
- * On Falcon-based NICs, this will:
149
- * - Check the on-board hardware monitor;
150
- * - Poll the link state and reconfigure the hardware as necessary.
151
- * On Siena-based NICs for power systems with EEH support, this will give EEH a
152
- * chance to start.
153
- */
154
-static unsigned int efx_monitor_interval = 1 * HZ;
15565
15666 /* Initial interrupt moderation settings. They can be modified after
15767 * module load with ethtool.
....@@ -172,37 +82,9 @@
17282 */
17383 static unsigned int tx_irq_mod_usec = 150;
17484
175
-/* This is the first interrupt mode to try out of:
176
- * 0 => MSI-X
177
- * 1 => MSI
178
- * 2 => legacy
179
- */
180
-static unsigned int interrupt_mode;
181
-
182
-/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
183
- * i.e. the number of CPUs among which we may distribute simultaneous
184
- * interrupt handling.
185
- *
186
- * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
187
- * The default (0) means to assign an interrupt to each core.
188
- */
189
-static unsigned int rss_cpus;
190
-module_param(rss_cpus, uint, 0444);
191
-MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
192
-
19385 static bool phy_flash_cfg;
19486 module_param(phy_flash_cfg, bool, 0644);
19587 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
196
-
197
-static unsigned irq_adapt_low_thresh = 8000;
198
-module_param(irq_adapt_low_thresh, uint, 0644);
199
-MODULE_PARM_DESC(irq_adapt_low_thresh,
200
- "Threshold score for reducing IRQ moderation");
201
-
202
-static unsigned irq_adapt_high_thresh = 16000;
203
-module_param(irq_adapt_high_thresh, uint, 0644);
204
-MODULE_PARM_DESC(irq_adapt_high_thresh,
205
- "Threshold score for increasing IRQ moderation");
20688
20789 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
20890 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
....@@ -217,732 +99,11 @@
21799 *
218100 *************************************************************************/
219101
220
-static int efx_soft_enable_interrupts(struct efx_nic *efx);
221
-static void efx_soft_disable_interrupts(struct efx_nic *efx);
222
-static void efx_remove_channel(struct efx_channel *channel);
223
-static void efx_remove_channels(struct efx_nic *efx);
224
-static const struct efx_channel_type efx_default_channel_type;
225102 static void efx_remove_port(struct efx_nic *efx);
226
-static void efx_init_napi_channel(struct efx_channel *channel);
227
-static void efx_fini_napi(struct efx_nic *efx);
228
-static void efx_fini_napi_channel(struct efx_channel *channel);
229
-static void efx_fini_struct(struct efx_nic *efx);
230
-static void efx_start_all(struct efx_nic *efx);
231
-static void efx_stop_all(struct efx_nic *efx);
232
-
233
-#define EFX_ASSERT_RESET_SERIALISED(efx) \
234
- do { \
235
- if ((efx->state == STATE_READY) || \
236
- (efx->state == STATE_RECOVERY) || \
237
- (efx->state == STATE_DISABLED)) \
238
- ASSERT_RTNL(); \
239
- } while (0)
240
-
241
-static int efx_check_disabled(struct efx_nic *efx)
242
-{
243
- if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
244
- netif_err(efx, drv, efx->net_dev,
245
- "device is disabled due to earlier errors\n");
246
- return -EIO;
247
- }
248
- return 0;
249
-}
250
-
251
-/**************************************************************************
252
- *
253
- * Event queue processing
254
- *
255
- *************************************************************************/
256
-
257
-/* Process channel's event queue
258
- *
259
- * This function is responsible for processing the event queue of a
260
- * single channel. The caller must guarantee that this function will
261
- * never be concurrently called more than once on the same channel,
262
- * though different channels may be being processed concurrently.
263
- */
264
-static int efx_process_channel(struct efx_channel *channel, int budget)
265
-{
266
- struct efx_tx_queue *tx_queue;
267
- struct list_head rx_list;
268
- int spent;
269
-
270
- if (unlikely(!channel->enabled))
271
- return 0;
272
-
273
- /* Prepare the batch receive list */
274
- EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
275
- INIT_LIST_HEAD(&rx_list);
276
- channel->rx_list = &rx_list;
277
-
278
- efx_for_each_channel_tx_queue(tx_queue, channel) {
279
- tx_queue->pkts_compl = 0;
280
- tx_queue->bytes_compl = 0;
281
- }
282
-
283
- spent = efx_nic_process_eventq(channel, budget);
284
- if (spent && efx_channel_has_rx_queue(channel)) {
285
- struct efx_rx_queue *rx_queue =
286
- efx_channel_get_rx_queue(channel);
287
-
288
- efx_rx_flush_packet(channel);
289
- efx_fast_push_rx_descriptors(rx_queue, true);
290
- }
291
-
292
- /* Update BQL */
293
- efx_for_each_channel_tx_queue(tx_queue, channel) {
294
- if (tx_queue->bytes_compl) {
295
- netdev_tx_completed_queue(tx_queue->core_txq,
296
- tx_queue->pkts_compl, tx_queue->bytes_compl);
297
- }
298
- }
299
-
300
- /* Receive any packets we queued up */
301
- netif_receive_skb_list(channel->rx_list);
302
- channel->rx_list = NULL;
303
-
304
- return spent;
305
-}
306
-
307
-/* NAPI poll handler
308
- *
309
- * NAPI guarantees serialisation of polls of the same device, which
310
- * provides the guarantee required by efx_process_channel().
311
- */
312
-static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
313
-{
314
- int step = efx->irq_mod_step_us;
315
-
316
- if (channel->irq_mod_score < irq_adapt_low_thresh) {
317
- if (channel->irq_moderation_us > step) {
318
- channel->irq_moderation_us -= step;
319
- efx->type->push_irq_moderation(channel);
320
- }
321
- } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
322
- if (channel->irq_moderation_us <
323
- efx->irq_rx_moderation_us) {
324
- channel->irq_moderation_us += step;
325
- efx->type->push_irq_moderation(channel);
326
- }
327
- }
328
-
329
- channel->irq_count = 0;
330
- channel->irq_mod_score = 0;
331
-}
332
-
333
-static int efx_poll(struct napi_struct *napi, int budget)
334
-{
335
- struct efx_channel *channel =
336
- container_of(napi, struct efx_channel, napi_str);
337
- struct efx_nic *efx = channel->efx;
338
- int spent;
339
-
340
- netif_vdbg(efx, intr, efx->net_dev,
341
- "channel %d NAPI poll executing on CPU %d\n",
342
- channel->channel, raw_smp_processor_id());
343
-
344
- spent = efx_process_channel(channel, budget);
345
-
346
- if (spent < budget) {
347
- if (efx_channel_has_rx_queue(channel) &&
348
- efx->irq_rx_adaptive &&
349
- unlikely(++channel->irq_count == 1000)) {
350
- efx_update_irq_mod(efx, channel);
351
- }
352
-
353
-#ifdef CONFIG_RFS_ACCEL
354
- /* Perhaps expire some ARFS filters */
355
- schedule_work(&channel->filter_work);
356
-#endif
357
-
358
- /* There is no race here; although napi_disable() will
359
- * only wait for napi_complete(), this isn't a problem
360
- * since efx_nic_eventq_read_ack() will have no effect if
361
- * interrupts have already been disabled.
362
- */
363
- if (napi_complete_done(napi, spent))
364
- efx_nic_eventq_read_ack(channel);
365
- }
366
-
367
- return spent;
368
-}
369
-
370
-/* Create event queue
371
- * Event queue memory allocations are done only once. If the channel
372
- * is reset, the memory buffer will be reused; this guards against
373
- * errors during channel reset and also simplifies interrupt handling.
374
- */
375
-static int efx_probe_eventq(struct efx_channel *channel)
376
-{
377
- struct efx_nic *efx = channel->efx;
378
- unsigned long entries;
379
-
380
- netif_dbg(efx, probe, efx->net_dev,
381
- "chan %d create event queue\n", channel->channel);
382
-
383
- /* Build an event queue with room for one event per tx and rx buffer,
384
- * plus some extra for link state events and MCDI completions. */
385
- entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
386
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
387
- channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
388
-
389
- return efx_nic_probe_eventq(channel);
390
-}
391
-
392
-/* Prepare channel's event queue */
393
-static int efx_init_eventq(struct efx_channel *channel)
394
-{
395
- struct efx_nic *efx = channel->efx;
396
- int rc;
397
-
398
- EFX_WARN_ON_PARANOID(channel->eventq_init);
399
-
400
- netif_dbg(efx, drv, efx->net_dev,
401
- "chan %d init event queue\n", channel->channel);
402
-
403
- rc = efx_nic_init_eventq(channel);
404
- if (rc == 0) {
405
- efx->type->push_irq_moderation(channel);
406
- channel->eventq_read_ptr = 0;
407
- channel->eventq_init = true;
408
- }
409
- return rc;
410
-}
411
-
412
-/* Enable event queue processing and NAPI */
413
-void efx_start_eventq(struct efx_channel *channel)
414
-{
415
- netif_dbg(channel->efx, ifup, channel->efx->net_dev,
416
- "chan %d start event queue\n", channel->channel);
417
-
418
- /* Make sure the NAPI handler sees the enabled flag set */
419
- channel->enabled = true;
420
- smp_wmb();
421
-
422
- napi_enable(&channel->napi_str);
423
- efx_nic_eventq_read_ack(channel);
424
-}
425
-
426
-/* Disable event queue processing and NAPI */
427
-void efx_stop_eventq(struct efx_channel *channel)
428
-{
429
- if (!channel->enabled)
430
- return;
431
-
432
- napi_disable(&channel->napi_str);
433
- channel->enabled = false;
434
-}
435
-
436
-static void efx_fini_eventq(struct efx_channel *channel)
437
-{
438
- if (!channel->eventq_init)
439
- return;
440
-
441
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
442
- "chan %d fini event queue\n", channel->channel);
443
-
444
- efx_nic_fini_eventq(channel);
445
- channel->eventq_init = false;
446
-}
447
-
448
-static void efx_remove_eventq(struct efx_channel *channel)
449
-{
450
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
451
- "chan %d remove event queue\n", channel->channel);
452
-
453
- efx_nic_remove_eventq(channel);
454
-}
455
-
456
-/**************************************************************************
457
- *
458
- * Channel handling
459
- *
460
- *************************************************************************/
461
-
462
-/* Allocate and initialise a channel structure. */
463
-static struct efx_channel *
464
-efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
465
-{
466
- struct efx_channel *channel;
467
- struct efx_rx_queue *rx_queue;
468
- struct efx_tx_queue *tx_queue;
469
- int j;
470
-
471
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
472
- if (!channel)
473
- return NULL;
474
-
475
- channel->efx = efx;
476
- channel->channel = i;
477
- channel->type = &efx_default_channel_type;
478
-
479
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
480
- tx_queue = &channel->tx_queue[j];
481
- tx_queue->efx = efx;
482
- tx_queue->queue = i * EFX_TXQ_TYPES + j;
483
- tx_queue->channel = channel;
484
- }
485
-
486
-#ifdef CONFIG_RFS_ACCEL
487
- INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
488
-#endif
489
-
490
- rx_queue = &channel->rx_queue;
491
- rx_queue->efx = efx;
492
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
493
-
494
- return channel;
495
-}
496
-
497
-/* Allocate and initialise a channel structure, copying parameters
498
- * (but not resources) from an old channel structure.
499
- */
500
-static struct efx_channel *
501
-efx_copy_channel(const struct efx_channel *old_channel)
502
-{
503
- struct efx_channel *channel;
504
- struct efx_rx_queue *rx_queue;
505
- struct efx_tx_queue *tx_queue;
506
- int j;
507
-
508
- channel = kmalloc(sizeof(*channel), GFP_KERNEL);
509
- if (!channel)
510
- return NULL;
511
-
512
- *channel = *old_channel;
513
-
514
- channel->napi_dev = NULL;
515
- INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
516
- channel->napi_str.napi_id = 0;
517
- channel->napi_str.state = 0;
518
- memset(&channel->eventq, 0, sizeof(channel->eventq));
519
-
520
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
521
- tx_queue = &channel->tx_queue[j];
522
- if (tx_queue->channel)
523
- tx_queue->channel = channel;
524
- tx_queue->buffer = NULL;
525
- tx_queue->cb_page = NULL;
526
- memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
527
- }
528
-
529
- rx_queue = &channel->rx_queue;
530
- rx_queue->buffer = NULL;
531
- memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
532
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
533
-#ifdef CONFIG_RFS_ACCEL
534
- INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
535
-#endif
536
-
537
- return channel;
538
-}
539
-
540
-static int efx_probe_channel(struct efx_channel *channel)
541
-{
542
- struct efx_tx_queue *tx_queue;
543
- struct efx_rx_queue *rx_queue;
544
- int rc;
545
-
546
- netif_dbg(channel->efx, probe, channel->efx->net_dev,
547
- "creating channel %d\n", channel->channel);
548
-
549
- rc = channel->type->pre_probe(channel);
550
- if (rc)
551
- goto fail;
552
-
553
- rc = efx_probe_eventq(channel);
554
- if (rc)
555
- goto fail;
556
-
557
- efx_for_each_channel_tx_queue(tx_queue, channel) {
558
- rc = efx_probe_tx_queue(tx_queue);
559
- if (rc)
560
- goto fail;
561
- }
562
-
563
- efx_for_each_channel_rx_queue(rx_queue, channel) {
564
- rc = efx_probe_rx_queue(rx_queue);
565
- if (rc)
566
- goto fail;
567
- }
568
-
569
- channel->rx_list = NULL;
570
-
571
- return 0;
572
-
573
-fail:
574
- efx_remove_channel(channel);
575
- return rc;
576
-}
577
-
578
-static void
579
-efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
580
-{
581
- struct efx_nic *efx = channel->efx;
582
- const char *type;
583
- int number;
584
-
585
- number = channel->channel;
586
- if (efx->tx_channel_offset == 0) {
587
- type = "";
588
- } else if (channel->channel < efx->tx_channel_offset) {
589
- type = "-rx";
590
- } else {
591
- type = "-tx";
592
- number -= efx->tx_channel_offset;
593
- }
594
- snprintf(buf, len, "%s%s-%d", efx->name, type, number);
595
-}
596
-
597
-static void efx_set_channel_names(struct efx_nic *efx)
598
-{
599
- struct efx_channel *channel;
600
-
601
- efx_for_each_channel(channel, efx)
602
- channel->type->get_name(channel,
603
- efx->msi_context[channel->channel].name,
604
- sizeof(efx->msi_context[0].name));
605
-}
606
-
607
-static int efx_probe_channels(struct efx_nic *efx)
608
-{
609
- struct efx_channel *channel;
610
- int rc;
611
-
612
- /* Restart special buffer allocation */
613
- efx->next_buffer_table = 0;
614
-
615
- /* Probe channels in reverse, so that any 'extra' channels
616
- * use the start of the buffer table. This allows the traffic
617
- * channels to be resized without moving them or wasting the
618
- * entries before them.
619
- */
620
- efx_for_each_channel_rev(channel, efx) {
621
- rc = efx_probe_channel(channel);
622
- if (rc) {
623
- netif_err(efx, probe, efx->net_dev,
624
- "failed to create channel %d\n",
625
- channel->channel);
626
- goto fail;
627
- }
628
- }
629
- efx_set_channel_names(efx);
630
-
631
- return 0;
632
-
633
-fail:
634
- efx_remove_channels(efx);
635
- return rc;
636
-}
637
-
638
-/* Channels are shutdown and reinitialised whilst the NIC is running
639
- * to propagate configuration changes (mtu, checksum offload), or
640
- * to clear hardware error conditions
641
- */
642
-static void efx_start_datapath(struct efx_nic *efx)
643
-{
644
- netdev_features_t old_features = efx->net_dev->features;
645
- bool old_rx_scatter = efx->rx_scatter;
646
- struct efx_tx_queue *tx_queue;
647
- struct efx_rx_queue *rx_queue;
648
- struct efx_channel *channel;
649
- size_t rx_buf_len;
650
-
651
- /* Calculate the rx buffer allocation parameters required to
652
- * support the current MTU, including padding for header
653
- * alignment and overruns.
654
- */
655
- efx->rx_dma_len = (efx->rx_prefix_size +
656
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
657
- efx->type->rx_buffer_padding);
658
- rx_buf_len = (sizeof(struct efx_rx_page_state) +
659
- efx->rx_ip_align + efx->rx_dma_len);
660
- if (rx_buf_len <= PAGE_SIZE) {
661
- efx->rx_scatter = efx->type->always_rx_scatter;
662
- efx->rx_buffer_order = 0;
663
- } else if (efx->type->can_rx_scatter) {
664
- BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
665
- BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
666
- 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
667
- EFX_RX_BUF_ALIGNMENT) >
668
- PAGE_SIZE);
669
- efx->rx_scatter = true;
670
- efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
671
- efx->rx_buffer_order = 0;
672
- } else {
673
- efx->rx_scatter = false;
674
- efx->rx_buffer_order = get_order(rx_buf_len);
675
- }
676
-
677
- efx_rx_config_page_split(efx);
678
- if (efx->rx_buffer_order)
679
- netif_dbg(efx, drv, efx->net_dev,
680
- "RX buf len=%u; page order=%u batch=%u\n",
681
- efx->rx_dma_len, efx->rx_buffer_order,
682
- efx->rx_pages_per_batch);
683
- else
684
- netif_dbg(efx, drv, efx->net_dev,
685
- "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
686
- efx->rx_dma_len, efx->rx_page_buf_step,
687
- efx->rx_bufs_per_page, efx->rx_pages_per_batch);
688
-
689
- /* Restore previously fixed features in hw_features and remove
690
- * features which are fixed now
691
- */
692
- efx->net_dev->hw_features |= efx->net_dev->features;
693
- efx->net_dev->hw_features &= ~efx->fixed_features;
694
- efx->net_dev->features |= efx->fixed_features;
695
- if (efx->net_dev->features != old_features)
696
- netdev_features_change(efx->net_dev);
697
-
698
- /* RX filters may also have scatter-enabled flags */
699
- if (efx->rx_scatter != old_rx_scatter)
700
- efx->type->filter_update_rx_scatter(efx);
701
-
702
- /* We must keep at least one descriptor in a TX ring empty.
703
- * We could avoid this when the queue size does not exactly
704
- * match the hardware ring size, but it's not that important.
705
- * Therefore we stop the queue when one more skb might fill
706
- * the ring completely. We wake it when half way back to
707
- * empty.
708
- */
709
- efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
710
- efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
711
-
712
- /* Initialise the channels */
713
- efx_for_each_channel(channel, efx) {
714
- efx_for_each_channel_tx_queue(tx_queue, channel) {
715
- efx_init_tx_queue(tx_queue);
716
- atomic_inc(&efx->active_queues);
717
- }
718
-
719
- efx_for_each_channel_rx_queue(rx_queue, channel) {
720
- efx_init_rx_queue(rx_queue);
721
- atomic_inc(&efx->active_queues);
722
- efx_stop_eventq(channel);
723
- efx_fast_push_rx_descriptors(rx_queue, false);
724
- efx_start_eventq(channel);
725
- }
726
-
727
- WARN_ON(channel->rx_pkt_n_frags);
728
- }
729
-
730
- efx_ptp_start_datapath(efx);
731
-
732
- if (netif_device_present(efx->net_dev))
733
- netif_tx_wake_all_queues(efx->net_dev);
734
-}
735
-
736
-static void efx_stop_datapath(struct efx_nic *efx)
737
-{
738
- struct efx_channel *channel;
739
- struct efx_tx_queue *tx_queue;
740
- struct efx_rx_queue *rx_queue;
741
- int rc;
742
-
743
- EFX_ASSERT_RESET_SERIALISED(efx);
744
- BUG_ON(efx->port_enabled);
745
-
746
- efx_ptp_stop_datapath(efx);
747
-
748
- /* Stop RX refill */
749
- efx_for_each_channel(channel, efx) {
750
- efx_for_each_channel_rx_queue(rx_queue, channel)
751
- rx_queue->refill_enabled = false;
752
- }
753
-
754
- efx_for_each_channel(channel, efx) {
755
- /* RX packet processing is pipelined, so wait for the
756
- * NAPI handler to complete. At least event queue 0
757
- * might be kept active by non-data events, so don't
758
- * use napi_synchronize() but actually disable NAPI
759
- * temporarily.
760
- */
761
- if (efx_channel_has_rx_queue(channel)) {
762
- efx_stop_eventq(channel);
763
- efx_start_eventq(channel);
764
- }
765
- }
766
-
767
- rc = efx->type->fini_dmaq(efx);
768
- if (rc) {
769
- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
770
- } else {
771
- netif_dbg(efx, drv, efx->net_dev,
772
- "successfully flushed all queues\n");
773
- }
774
-
775
- efx_for_each_channel(channel, efx) {
776
- efx_for_each_channel_rx_queue(rx_queue, channel)
777
- efx_fini_rx_queue(rx_queue);
778
- efx_for_each_possible_channel_tx_queue(tx_queue, channel)
779
- efx_fini_tx_queue(tx_queue);
780
- }
781
-}
782
-
783
-static void efx_remove_channel(struct efx_channel *channel)
784
-{
785
- struct efx_tx_queue *tx_queue;
786
- struct efx_rx_queue *rx_queue;
787
-
788
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
789
- "destroy chan %d\n", channel->channel);
790
-
791
- efx_for_each_channel_rx_queue(rx_queue, channel)
792
- efx_remove_rx_queue(rx_queue);
793
- efx_for_each_possible_channel_tx_queue(tx_queue, channel)
794
- efx_remove_tx_queue(tx_queue);
795
- efx_remove_eventq(channel);
796
- channel->type->post_remove(channel);
797
-}
798
-
799
-static void efx_remove_channels(struct efx_nic *efx)
800
-{
801
- struct efx_channel *channel;
802
-
803
- efx_for_each_channel(channel, efx)
804
- efx_remove_channel(channel);
805
-}
806
-
807
-int
808
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
809
-{
810
- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
811
- u32 old_rxq_entries, old_txq_entries;
812
- unsigned i, next_buffer_table = 0;
813
- int rc, rc2;
814
-
815
- rc = efx_check_disabled(efx);
816
- if (rc)
817
- return rc;
818
-
819
- /* Not all channels should be reallocated. We must avoid
820
- * reallocating their buffer table entries.
821
- */
822
- efx_for_each_channel(channel, efx) {
823
- struct efx_rx_queue *rx_queue;
824
- struct efx_tx_queue *tx_queue;
825
-
826
- if (channel->type->copy)
827
- continue;
828
- next_buffer_table = max(next_buffer_table,
829
- channel->eventq.index +
830
- channel->eventq.entries);
831
- efx_for_each_channel_rx_queue(rx_queue, channel)
832
- next_buffer_table = max(next_buffer_table,
833
- rx_queue->rxd.index +
834
- rx_queue->rxd.entries);
835
- efx_for_each_channel_tx_queue(tx_queue, channel)
836
- next_buffer_table = max(next_buffer_table,
837
- tx_queue->txd.index +
838
- tx_queue->txd.entries);
839
- }
840
-
841
- efx_device_detach_sync(efx);
842
- efx_stop_all(efx);
843
- efx_soft_disable_interrupts(efx);
844
-
845
- /* Clone channels (where possible) */
846
- memset(other_channel, 0, sizeof(other_channel));
847
- for (i = 0; i < efx->n_channels; i++) {
848
- channel = efx->channel[i];
849
- if (channel->type->copy)
850
- channel = channel->type->copy(channel);
851
- if (!channel) {
852
- rc = -ENOMEM;
853
- goto out;
854
- }
855
- other_channel[i] = channel;
856
- }
857
-
858
- /* Swap entry counts and channel pointers */
859
- old_rxq_entries = efx->rxq_entries;
860
- old_txq_entries = efx->txq_entries;
861
- efx->rxq_entries = rxq_entries;
862
- efx->txq_entries = txq_entries;
863
- for (i = 0; i < efx->n_channels; i++) {
864
- channel = efx->channel[i];
865
- efx->channel[i] = other_channel[i];
866
- other_channel[i] = channel;
867
- }
868
-
869
- /* Restart buffer table allocation */
870
- efx->next_buffer_table = next_buffer_table;
871
-
872
- for (i = 0; i < efx->n_channels; i++) {
873
- channel = efx->channel[i];
874
- if (!channel->type->copy)
875
- continue;
876
- rc = efx_probe_channel(channel);
877
- if (rc)
878
- goto rollback;
879
- efx_init_napi_channel(efx->channel[i]);
880
- }
881
-
882
-out:
883
- /* Destroy unused channel structures */
884
- for (i = 0; i < efx->n_channels; i++) {
885
- channel = other_channel[i];
886
- if (channel && channel->type->copy) {
887
- efx_fini_napi_channel(channel);
888
- efx_remove_channel(channel);
889
- kfree(channel);
890
- }
891
- }
892
-
893
- rc2 = efx_soft_enable_interrupts(efx);
894
- if (rc2) {
895
- rc = rc ? rc : rc2;
896
- netif_err(efx, drv, efx->net_dev,
897
- "unable to restart interrupts on channel reallocation\n");
898
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
899
- } else {
900
- efx_start_all(efx);
901
- efx_device_attach_if_not_resetting(efx);
902
- }
903
- return rc;
904
-
905
-rollback:
906
- /* Swap back */
907
- efx->rxq_entries = old_rxq_entries;
908
- efx->txq_entries = old_txq_entries;
909
- for (i = 0; i < efx->n_channels; i++) {
910
- channel = efx->channel[i];
911
- efx->channel[i] = other_channel[i];
912
- other_channel[i] = channel;
913
- }
914
- goto out;
915
-}
916
-
917
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
918
-{
919
- mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
920
-}
921
-
922
-static bool efx_default_channel_want_txqs(struct efx_channel *channel)
923
-{
924
- return channel->channel - channel->efx->tx_channel_offset <
925
- channel->efx->n_tx_channels;
926
-}
927
-
928
-static const struct efx_channel_type efx_default_channel_type = {
929
- .pre_probe = efx_channel_dummy_op_int,
930
- .post_remove = efx_channel_dummy_op_void,
931
- .get_name = efx_get_channel_name,
932
- .copy = efx_copy_channel,
933
- .want_txqs = efx_default_channel_want_txqs,
934
- .keep_eventq = false,
935
- .want_pio = true,
936
-};
937
-
938
-int efx_channel_dummy_op_int(struct efx_channel *channel)
939
-{
940
- return 0;
941
-}
942
-
943
-void efx_channel_dummy_op_void(struct efx_channel *channel)
944
-{
945
-}
103
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
104
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
105
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
106
+ u32 flags);
946107
947108 /**************************************************************************
948109 *
....@@ -950,147 +111,7 @@
950111 *
951112 **************************************************************************/
952113
953
-/* This ensures that the kernel is kept informed (via
954
- * netif_carrier_on/off) of the link status, and also maintains the
955
- * link status's stop on the port's TX queue.
956
- */
957
-void efx_link_status_changed(struct efx_nic *efx)
958
-{
959
- struct efx_link_state *link_state = &efx->link_state;
960
-
961
- /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
962
- * that no events are triggered between unregister_netdev() and the
963
- * driver unloading. A more general condition is that NETDEV_CHANGE
964
- * can only be generated between NETDEV_UP and NETDEV_DOWN */
965
- if (!netif_running(efx->net_dev))
966
- return;
967
-
968
- if (link_state->up != netif_carrier_ok(efx->net_dev)) {
969
- efx->n_link_state_changes++;
970
-
971
- if (link_state->up)
972
- netif_carrier_on(efx->net_dev);
973
- else
974
- netif_carrier_off(efx->net_dev);
975
- }
976
-
977
- /* Status message for kernel log */
978
- if (link_state->up)
979
- netif_info(efx, link, efx->net_dev,
980
- "link up at %uMbps %s-duplex (MTU %d)\n",
981
- link_state->speed, link_state->fd ? "full" : "half",
982
- efx->net_dev->mtu);
983
- else
984
- netif_info(efx, link, efx->net_dev, "link down\n");
985
-}
986
-
987
-void efx_link_set_advertising(struct efx_nic *efx,
988
- const unsigned long *advertising)
989
-{
990
- memcpy(efx->link_advertising, advertising,
991
- sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
992
-
993
- efx->link_advertising[0] |= ADVERTISED_Autoneg;
994
- if (advertising[0] & ADVERTISED_Pause)
995
- efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
996
- else
997
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
998
- if (advertising[0] & ADVERTISED_Asym_Pause)
999
- efx->wanted_fc ^= EFX_FC_TX;
1000
-}
1001
-
1002
-/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
1003
- * force the Autoneg bit on.
1004
- */
1005
-void efx_link_clear_advertising(struct efx_nic *efx)
1006
-{
1007
- bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
1008
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
1009
-}
1010
-
1011
-void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
1012
-{
1013
- efx->wanted_fc = wanted_fc;
1014
- if (efx->link_advertising[0]) {
1015
- if (wanted_fc & EFX_FC_RX)
1016
- efx->link_advertising[0] |= (ADVERTISED_Pause |
1017
- ADVERTISED_Asym_Pause);
1018
- else
1019
- efx->link_advertising[0] &= ~(ADVERTISED_Pause |
1020
- ADVERTISED_Asym_Pause);
1021
- if (wanted_fc & EFX_FC_TX)
1022
- efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
1023
- }
1024
-}
1025
-
1026114 static void efx_fini_port(struct efx_nic *efx);
1027
-
1028
-/* We assume that efx->type->reconfigure_mac will always try to sync RX
1029
- * filters and therefore needs to read-lock the filter table against freeing
1030
- */
1031
-void efx_mac_reconfigure(struct efx_nic *efx)
1032
-{
1033
- down_read(&efx->filter_sem);
1034
- efx->type->reconfigure_mac(efx);
1035
- up_read(&efx->filter_sem);
1036
-}
1037
-
1038
-/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
1039
- * the MAC appropriately. All other PHY configuration changes are pushed
1040
- * through phy_op->set_settings(), and pushed asynchronously to the MAC
1041
- * through efx_monitor().
1042
- *
1043
- * Callers must hold the mac_lock
1044
- */
1045
-int __efx_reconfigure_port(struct efx_nic *efx)
1046
-{
1047
- enum efx_phy_mode phy_mode;
1048
- int rc;
1049
-
1050
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
1051
-
1052
- /* Disable PHY transmit in mac level loopbacks */
1053
- phy_mode = efx->phy_mode;
1054
- if (LOOPBACK_INTERNAL(efx))
1055
- efx->phy_mode |= PHY_MODE_TX_DISABLED;
1056
- else
1057
- efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
1058
-
1059
- rc = efx->type->reconfigure_port(efx);
1060
-
1061
- if (rc)
1062
- efx->phy_mode = phy_mode;
1063
-
1064
- return rc;
1065
-}
1066
-
1067
-/* Reinitialise the MAC to pick up new PHY settings, even if the port is
1068
- * disabled. */
1069
-int efx_reconfigure_port(struct efx_nic *efx)
1070
-{
1071
- int rc;
1072
-
1073
- EFX_ASSERT_RESET_SERIALISED(efx);
1074
-
1075
- mutex_lock(&efx->mac_lock);
1076
- rc = __efx_reconfigure_port(efx);
1077
- mutex_unlock(&efx->mac_lock);
1078
-
1079
- return rc;
1080
-}
1081
-
1082
-/* Asynchronous work item for changing MAC promiscuity and multicast
1083
- * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
1084
- * MAC directly. */
1085
-static void efx_mac_work(struct work_struct *data)
1086
-{
1087
- struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
1088
-
1089
- mutex_lock(&efx->mac_lock);
1090
- if (efx->port_enabled)
1091
- efx_mac_reconfigure(efx);
1092
- mutex_unlock(&efx->mac_lock);
1093
-}
1094115
1095116 static int efx_probe_port(struct efx_nic *efx)
1096117 {
....@@ -1120,67 +141,19 @@
1120141
1121142 mutex_lock(&efx->mac_lock);
1122143
1123
- rc = efx->phy_op->init(efx);
1124
- if (rc)
1125
- goto fail1;
1126
-
1127144 efx->port_initialized = true;
1128145
1129
- /* Reconfigure the MAC before creating dma queues (required for
1130
- * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1131
- efx_mac_reconfigure(efx);
1132
-
1133146 /* Ensure the PHY advertises the correct flow control settings */
1134
- rc = efx->phy_op->reconfigure(efx);
147
+ rc = efx_mcdi_port_reconfigure(efx);
1135148 if (rc && rc != -EPERM)
1136
- goto fail2;
149
+ goto fail;
1137150
1138151 mutex_unlock(&efx->mac_lock);
1139152 return 0;
1140153
1141
-fail2:
1142
- efx->phy_op->fini(efx);
1143
-fail1:
154
+fail:
1144155 mutex_unlock(&efx->mac_lock);
1145156 return rc;
1146
-}
1147
-
1148
-static void efx_start_port(struct efx_nic *efx)
1149
-{
1150
- netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1151
- BUG_ON(efx->port_enabled);
1152
-
1153
- mutex_lock(&efx->mac_lock);
1154
- efx->port_enabled = true;
1155
-
1156
- /* Ensure MAC ingress/egress is enabled */
1157
- efx_mac_reconfigure(efx);
1158
-
1159
- mutex_unlock(&efx->mac_lock);
1160
-}
1161
-
1162
-/* Cancel work for MAC reconfiguration, periodic hardware monitoring
1163
- * and the async self-test, wait for them to finish and prevent them
1164
- * being scheduled again. This doesn't cover online resets, which
1165
- * should only be cancelled when removing the device.
1166
- */
1167
-static void efx_stop_port(struct efx_nic *efx)
1168
-{
1169
- netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1170
-
1171
- EFX_ASSERT_RESET_SERIALISED(efx);
1172
-
1173
- mutex_lock(&efx->mac_lock);
1174
- efx->port_enabled = false;
1175
- mutex_unlock(&efx->mac_lock);
1176
-
1177
- /* Serialise against efx_set_multicast_list() */
1178
- netif_addr_lock_bh(efx->net_dev);
1179
- netif_addr_unlock_bh(efx->net_dev);
1180
-
1181
- cancel_delayed_work_sync(&efx->monitor_work);
1182
- efx_selftest_async_cancel(efx);
1183
- cancel_work_sync(&efx->mac_work);
1184157 }
1185158
1186159 static void efx_fini_port(struct efx_nic *efx)
....@@ -1190,7 +163,6 @@
1190163 if (!efx->port_initialized)
1191164 return;
1192165
1193
- efx->phy_op->fini(efx);
1194166 efx->port_initialized = false;
1195167
1196168 efx->link_state.up = false;
....@@ -1281,478 +253,6 @@
1281253 }
1282254 }
1283255
1284
-/* This configures the PCI device to enable I/O and DMA. */
1285
-static int efx_init_io(struct efx_nic *efx)
1286
-{
1287
- struct pci_dev *pci_dev = efx->pci_dev;
1288
- dma_addr_t dma_mask = efx->type->max_dma_mask;
1289
- unsigned int mem_map_size = efx->type->mem_map_size(efx);
1290
- int rc, bar;
1291
-
1292
- netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1293
-
1294
- bar = efx->type->mem_bar(efx);
1295
-
1296
- rc = pci_enable_device(pci_dev);
1297
- if (rc) {
1298
- netif_err(efx, probe, efx->net_dev,
1299
- "failed to enable PCI device\n");
1300
- goto fail1;
1301
- }
1302
-
1303
- pci_set_master(pci_dev);
1304
-
1305
- /* Set the PCI DMA mask. Try all possibilities from our genuine mask
1306
- * down to 32 bits, because some architectures will allow 40 bit
1307
- * masks event though they reject 46 bit masks.
1308
- */
1309
- while (dma_mask > 0x7fffffffUL) {
1310
- rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1311
- if (rc == 0)
1312
- break;
1313
- dma_mask >>= 1;
1314
- }
1315
- if (rc) {
1316
- netif_err(efx, probe, efx->net_dev,
1317
- "could not find a suitable DMA mask\n");
1318
- goto fail2;
1319
- }
1320
- netif_dbg(efx, probe, efx->net_dev,
1321
- "using DMA mask %llx\n", (unsigned long long) dma_mask);
1322
-
1323
- efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1324
- rc = pci_request_region(pci_dev, bar, "sfc");
1325
- if (rc) {
1326
- netif_err(efx, probe, efx->net_dev,
1327
- "request for memory BAR failed\n");
1328
- rc = -EIO;
1329
- goto fail3;
1330
- }
1331
- efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1332
- if (!efx->membase) {
1333
- netif_err(efx, probe, efx->net_dev,
1334
- "could not map memory BAR at %llx+%x\n",
1335
- (unsigned long long)efx->membase_phys, mem_map_size);
1336
- rc = -ENOMEM;
1337
- goto fail4;
1338
- }
1339
- netif_dbg(efx, probe, efx->net_dev,
1340
- "memory BAR at %llx+%x (virtual %p)\n",
1341
- (unsigned long long)efx->membase_phys, mem_map_size,
1342
- efx->membase);
1343
-
1344
- return 0;
1345
-
1346
- fail4:
1347
- pci_release_region(efx->pci_dev, bar);
1348
- fail3:
1349
- efx->membase_phys = 0;
1350
- fail2:
1351
- pci_disable_device(efx->pci_dev);
1352
- fail1:
1353
- return rc;
1354
-}
1355
-
1356
-static void efx_fini_io(struct efx_nic *efx)
1357
-{
1358
- int bar;
1359
-
1360
- netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1361
-
1362
- if (efx->membase) {
1363
- iounmap(efx->membase);
1364
- efx->membase = NULL;
1365
- }
1366
-
1367
- if (efx->membase_phys) {
1368
- bar = efx->type->mem_bar(efx);
1369
- pci_release_region(efx->pci_dev, bar);
1370
- efx->membase_phys = 0;
1371
- }
1372
-
1373
- /* Don't disable bus-mastering if VFs are assigned */
1374
- if (!pci_vfs_assigned(efx->pci_dev))
1375
- pci_disable_device(efx->pci_dev);
1376
-}
1377
-
1378
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
1379
- struct efx_rss_context *ctx)
1380
-{
1381
- size_t i;
1382
-
1383
- for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
1384
- ctx->rx_indir_table[i] =
1385
- ethtool_rxfh_indir_default(i, efx->rss_spread);
1386
-}
1387
-
1388
-static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1389
-{
1390
- cpumask_var_t thread_mask;
1391
- unsigned int count;
1392
- int cpu;
1393
-
1394
- if (rss_cpus) {
1395
- count = rss_cpus;
1396
- } else {
1397
- if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1398
- netif_warn(efx, probe, efx->net_dev,
1399
- "RSS disabled due to allocation failure\n");
1400
- return 1;
1401
- }
1402
-
1403
- count = 0;
1404
- for_each_online_cpu(cpu) {
1405
- if (!cpumask_test_cpu(cpu, thread_mask)) {
1406
- ++count;
1407
- cpumask_or(thread_mask, thread_mask,
1408
- topology_sibling_cpumask(cpu));
1409
- }
1410
- }
1411
-
1412
- free_cpumask_var(thread_mask);
1413
- }
1414
-
1415
- if (count > EFX_MAX_RX_QUEUES) {
1416
- netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1417
- "Reducing number of rx queues from %u to %u.\n",
1418
- count, EFX_MAX_RX_QUEUES);
1419
- count = EFX_MAX_RX_QUEUES;
1420
- }
1421
-
1422
- /* If RSS is requested for the PF *and* VFs then we can't write RSS
1423
- * table entries that are inaccessible to VFs
1424
- */
1425
-#ifdef CONFIG_SFC_SRIOV
1426
- if (efx->type->sriov_wanted) {
1427
- if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1428
- count > efx_vf_size(efx)) {
1429
- netif_warn(efx, probe, efx->net_dev,
1430
- "Reducing number of RSS channels from %u to %u for "
1431
- "VF support. Increase vf-msix-limit to use more "
1432
- "channels on the PF.\n",
1433
- count, efx_vf_size(efx));
1434
- count = efx_vf_size(efx);
1435
- }
1436
- }
1437
-#endif
1438
-
1439
- return count;
1440
-}
1441
-
1442
-/* Probe the number and type of interrupts we are able to obtain, and
1443
- * the resulting numbers of channels and RX queues.
1444
- */
1445
-static int efx_probe_interrupts(struct efx_nic *efx)
1446
-{
1447
- unsigned int extra_channels = 0;
1448
- unsigned int i, j;
1449
- int rc;
1450
-
1451
- for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
1452
- if (efx->extra_channel_type[i])
1453
- ++extra_channels;
1454
-
1455
- if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1456
- struct msix_entry xentries[EFX_MAX_CHANNELS];
1457
- unsigned int n_channels;
1458
-
1459
- n_channels = efx_wanted_parallelism(efx);
1460
- if (efx_separate_tx_channels)
1461
- n_channels *= 2;
1462
- n_channels += extra_channels;
1463
- n_channels = min(n_channels, efx->max_channels);
1464
-
1465
- for (i = 0; i < n_channels; i++)
1466
- xentries[i].entry = i;
1467
- rc = pci_enable_msix_range(efx->pci_dev,
1468
- xentries, 1, n_channels);
1469
- if (rc < 0) {
1470
- /* Fall back to single channel MSI */
1471
- netif_err(efx, drv, efx->net_dev,
1472
- "could not enable MSI-X\n");
1473
- if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
1474
- efx->interrupt_mode = EFX_INT_MODE_MSI;
1475
- else
1476
- return rc;
1477
- } else if (rc < n_channels) {
1478
- netif_err(efx, drv, efx->net_dev,
1479
- "WARNING: Insufficient MSI-X vectors"
1480
- " available (%d < %u).\n", rc, n_channels);
1481
- netif_err(efx, drv, efx->net_dev,
1482
- "WARNING: Performance may be reduced.\n");
1483
- n_channels = rc;
1484
- }
1485
-
1486
- if (rc > 0) {
1487
- efx->n_channels = n_channels;
1488
- if (n_channels > extra_channels)
1489
- n_channels -= extra_channels;
1490
- if (efx_separate_tx_channels) {
1491
- efx->n_tx_channels = min(max(n_channels / 2,
1492
- 1U),
1493
- efx->max_tx_channels);
1494
- efx->n_rx_channels = max(n_channels -
1495
- efx->n_tx_channels,
1496
- 1U);
1497
- } else {
1498
- efx->n_tx_channels = min(n_channels,
1499
- efx->max_tx_channels);
1500
- efx->n_rx_channels = n_channels;
1501
- }
1502
- for (i = 0; i < efx->n_channels; i++)
1503
- efx_get_channel(efx, i)->irq =
1504
- xentries[i].vector;
1505
- }
1506
- }
1507
-
1508
- /* Try single interrupt MSI */
1509
- if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1510
- efx->n_channels = 1;
1511
- efx->n_rx_channels = 1;
1512
- efx->n_tx_channels = 1;
1513
- rc = pci_enable_msi(efx->pci_dev);
1514
- if (rc == 0) {
1515
- efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1516
- } else {
1517
- netif_err(efx, drv, efx->net_dev,
1518
- "could not enable MSI\n");
1519
- if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
1520
- efx->interrupt_mode = EFX_INT_MODE_LEGACY;
1521
- else
1522
- return rc;
1523
- }
1524
- }
1525
-
1526
- /* Assume legacy interrupts */
1527
- if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1528
- efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
1529
- efx->n_rx_channels = 1;
1530
- efx->n_tx_channels = 1;
1531
- efx->legacy_irq = efx->pci_dev->irq;
1532
- }
1533
-
1534
- /* Assign extra channels if possible */
1535
- efx->n_extra_tx_channels = 0;
1536
- j = efx->n_channels;
1537
- for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
1538
- if (!efx->extra_channel_type[i])
1539
- continue;
1540
- if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
1541
- efx->n_channels <= extra_channels) {
1542
- efx->extra_channel_type[i]->handle_no_channel(efx);
1543
- } else {
1544
- --j;
1545
- efx_get_channel(efx, j)->type =
1546
- efx->extra_channel_type[i];
1547
- if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
1548
- efx->n_extra_tx_channels++;
1549
- }
1550
- }
1551
-
1552
- /* RSS might be usable on VFs even if it is disabled on the PF */
1553
-#ifdef CONFIG_SFC_SRIOV
1554
- if (efx->type->sriov_wanted) {
1555
- efx->rss_spread = ((efx->n_rx_channels > 1 ||
1556
- !efx->type->sriov_wanted(efx)) ?
1557
- efx->n_rx_channels : efx_vf_size(efx));
1558
- return 0;
1559
- }
1560
-#endif
1561
- efx->rss_spread = efx->n_rx_channels;
1562
-
1563
- return 0;
1564
-}
1565
-
1566
-#if defined(CONFIG_SMP)
1567
-static void efx_set_interrupt_affinity(struct efx_nic *efx)
1568
-{
1569
- struct efx_channel *channel;
1570
- unsigned int cpu;
1571
-
1572
- efx_for_each_channel(channel, efx) {
1573
- cpu = cpumask_local_spread(channel->channel,
1574
- pcibus_to_node(efx->pci_dev->bus));
1575
- irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
1576
- }
1577
-}
1578
-
1579
-static void efx_clear_interrupt_affinity(struct efx_nic *efx)
1580
-{
1581
- struct efx_channel *channel;
1582
-
1583
- efx_for_each_channel(channel, efx)
1584
- irq_set_affinity_hint(channel->irq, NULL);
1585
-}
1586
-#else
1587
-static void
1588
-efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
1589
-{
1590
-}
1591
-
1592
-static void
1593
-efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
1594
-{
1595
-}
1596
-#endif /* CONFIG_SMP */
1597
-
1598
-static int efx_soft_enable_interrupts(struct efx_nic *efx)
1599
-{
1600
- struct efx_channel *channel, *end_channel;
1601
- int rc;
1602
-
1603
- BUG_ON(efx->state == STATE_DISABLED);
1604
-
1605
- efx->irq_soft_enabled = true;
1606
- smp_wmb();
1607
-
1608
- efx_for_each_channel(channel, efx) {
1609
- if (!channel->type->keep_eventq) {
1610
- rc = efx_init_eventq(channel);
1611
- if (rc)
1612
- goto fail;
1613
- }
1614
- efx_start_eventq(channel);
1615
- }
1616
-
1617
- efx_mcdi_mode_event(efx);
1618
-
1619
- return 0;
1620
-fail:
1621
- end_channel = channel;
1622
- efx_for_each_channel(channel, efx) {
1623
- if (channel == end_channel)
1624
- break;
1625
- efx_stop_eventq(channel);
1626
- if (!channel->type->keep_eventq)
1627
- efx_fini_eventq(channel);
1628
- }
1629
-
1630
- return rc;
1631
-}
1632
-
1633
-static void efx_soft_disable_interrupts(struct efx_nic *efx)
1634
-{
1635
- struct efx_channel *channel;
1636
-
1637
- if (efx->state == STATE_DISABLED)
1638
- return;
1639
-
1640
- efx_mcdi_mode_poll(efx);
1641
-
1642
- efx->irq_soft_enabled = false;
1643
- smp_wmb();
1644
-
1645
- if (efx->legacy_irq)
1646
- synchronize_irq(efx->legacy_irq);
1647
-
1648
- efx_for_each_channel(channel, efx) {
1649
- if (channel->irq)
1650
- synchronize_irq(channel->irq);
1651
-
1652
- efx_stop_eventq(channel);
1653
- if (!channel->type->keep_eventq)
1654
- efx_fini_eventq(channel);
1655
- }
1656
-
1657
- /* Flush the asynchronous MCDI request queue */
1658
- efx_mcdi_flush_async(efx);
1659
-}
1660
-
1661
-static int efx_enable_interrupts(struct efx_nic *efx)
1662
-{
1663
- struct efx_channel *channel, *end_channel;
1664
- int rc;
1665
-
1666
- BUG_ON(efx->state == STATE_DISABLED);
1667
-
1668
- if (efx->eeh_disabled_legacy_irq) {
1669
- enable_irq(efx->legacy_irq);
1670
- efx->eeh_disabled_legacy_irq = false;
1671
- }
1672
-
1673
- efx->type->irq_enable_master(efx);
1674
-
1675
- efx_for_each_channel(channel, efx) {
1676
- if (channel->type->keep_eventq) {
1677
- rc = efx_init_eventq(channel);
1678
- if (rc)
1679
- goto fail;
1680
- }
1681
- }
1682
-
1683
- rc = efx_soft_enable_interrupts(efx);
1684
- if (rc)
1685
- goto fail;
1686
-
1687
- return 0;
1688
-
1689
-fail:
1690
- end_channel = channel;
1691
- efx_for_each_channel(channel, efx) {
1692
- if (channel == end_channel)
1693
- break;
1694
- if (channel->type->keep_eventq)
1695
- efx_fini_eventq(channel);
1696
- }
1697
-
1698
- efx->type->irq_disable_non_ev(efx);
1699
-
1700
- return rc;
1701
-}
1702
-
1703
-static void efx_disable_interrupts(struct efx_nic *efx)
1704
-{
1705
- struct efx_channel *channel;
1706
-
1707
- efx_soft_disable_interrupts(efx);
1708
-
1709
- efx_for_each_channel(channel, efx) {
1710
- if (channel->type->keep_eventq)
1711
- efx_fini_eventq(channel);
1712
- }
1713
-
1714
- efx->type->irq_disable_non_ev(efx);
1715
-}
1716
-
1717
-static void efx_remove_interrupts(struct efx_nic *efx)
1718
-{
1719
- struct efx_channel *channel;
1720
-
1721
- /* Remove MSI/MSI-X interrupts */
1722
- efx_for_each_channel(channel, efx)
1723
- channel->irq = 0;
1724
- pci_disable_msi(efx->pci_dev);
1725
- pci_disable_msix(efx->pci_dev);
1726
-
1727
- /* Remove legacy interrupt */
1728
- efx->legacy_irq = 0;
1729
-}
1730
-
1731
-static void efx_set_channels(struct efx_nic *efx)
1732
-{
1733
- struct efx_channel *channel;
1734
- struct efx_tx_queue *tx_queue;
1735
-
1736
- efx->tx_channel_offset =
1737
- efx_separate_tx_channels ?
1738
- efx->n_channels - efx->n_tx_channels : 0;
1739
-
1740
- /* We need to mark which channels really have RX and TX
1741
- * queues, and adjust the TX queue numbers if we have separate
1742
- * RX-only and TX-only channels.
1743
- */
1744
- efx_for_each_channel(channel, efx) {
1745
- if (channel->channel < efx->n_rx_channels)
1746
- channel->rx_queue.core_index = channel->channel;
1747
- else
1748
- channel->rx_queue.core_index = -1;
1749
-
1750
- efx_for_each_channel_tx_queue(tx_queue, channel)
1751
- tx_queue->queue -= (efx->tx_channel_offset *
1752
- EFX_TXQ_TYPES);
1753
- }
1754
-}
1755
-
1756256 static int efx_probe_nic(struct efx_nic *efx)
1757257 {
1758258 int rc;
....@@ -1780,7 +280,9 @@
1780280 if (rc)
1781281 goto fail1;
1782282
1783
- efx_set_channels(efx);
283
+ rc = efx_set_channels(efx);
284
+ if (rc)
285
+ goto fail1;
1784286
1785287 /* dimension_resources can fail with EAGAIN */
1786288 rc = efx->type->dimension_resources(efx);
....@@ -1797,9 +299,6 @@
1797299 netdev_rss_key_fill(efx->rss_context.rx_hash_key,
1798300 sizeof(efx->rss_context.rx_hash_key));
1799301 efx_set_default_rx_indir_table(efx, &efx->rss_context);
1800
-
1801
- netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1802
- netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1803302
1804303 /* Initialise the interrupt moderation settings */
1805304 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
....@@ -1822,68 +321,6 @@
1822321 efx_remove_interrupts(efx);
1823322 efx->type->remove(efx);
1824323 }
1825
-
1826
-static int efx_probe_filters(struct efx_nic *efx)
1827
-{
1828
- int rc;
1829
-
1830
- init_rwsem(&efx->filter_sem);
1831
- mutex_lock(&efx->mac_lock);
1832
- down_write(&efx->filter_sem);
1833
- rc = efx->type->filter_table_probe(efx);
1834
- if (rc)
1835
- goto out_unlock;
1836
-
1837
-#ifdef CONFIG_RFS_ACCEL
1838
- if (efx->type->offload_features & NETIF_F_NTUPLE) {
1839
- struct efx_channel *channel;
1840
- int i, success = 1;
1841
-
1842
- efx_for_each_channel(channel, efx) {
1843
- channel->rps_flow_id =
1844
- kcalloc(efx->type->max_rx_ip_filters,
1845
- sizeof(*channel->rps_flow_id),
1846
- GFP_KERNEL);
1847
- if (!channel->rps_flow_id)
1848
- success = 0;
1849
- else
1850
- for (i = 0;
1851
- i < efx->type->max_rx_ip_filters;
1852
- ++i)
1853
- channel->rps_flow_id[i] =
1854
- RPS_FLOW_ID_INVALID;
1855
- }
1856
-
1857
- if (!success) {
1858
- efx_for_each_channel(channel, efx)
1859
- kfree(channel->rps_flow_id);
1860
- efx->type->filter_table_remove(efx);
1861
- rc = -ENOMEM;
1862
- goto out_unlock;
1863
- }
1864
-
1865
- efx->rps_expire_index = efx->rps_expire_channel = 0;
1866
- }
1867
-#endif
1868
-out_unlock:
1869
- up_write(&efx->filter_sem);
1870
- mutex_unlock(&efx->mac_lock);
1871
- return rc;
1872
-}
1873
-
1874
-static void efx_remove_filters(struct efx_nic *efx)
1875
-{
1876
-#ifdef CONFIG_RFS_ACCEL
1877
- struct efx_channel *channel;
1878
-
1879
- efx_for_each_channel(channel, efx)
1880
- kfree(channel->rps_flow_id);
1881
-#endif
1882
- down_write(&efx->filter_sem);
1883
- efx->type->filter_table_remove(efx);
1884
- up_write(&efx->filter_sem);
1885
-}
1886
-
1887324
1888325 /**************************************************************************
1889326 *
....@@ -1912,7 +349,6 @@
1912349 rc = -EINVAL;
1913350 goto fail3;
1914351 }
1915
- efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1916352
1917353 #ifdef CONFIG_SFC_SRIOV
1918354 rc = efx->type->vswitching_probe(efx);
....@@ -1933,6 +369,8 @@
1933369 if (rc)
1934370 goto fail5;
1935371
372
+ efx->state = STATE_NET_DOWN;
373
+
1936374 return 0;
1937375
1938376 fail5:
....@@ -1949,83 +387,12 @@
1949387 return rc;
1950388 }
1951389
1952
-/* If the interface is supposed to be running but is not, start
1953
- * the hardware and software data path, regular activity for the port
1954
- * (MAC statistics, link polling, etc.) and schedule the port to be
1955
- * reconfigured. Interrupts must already be enabled. This function
1956
- * is safe to call multiple times, so long as the NIC is not disabled.
1957
- * Requires the RTNL lock.
1958
- */
1959
-static void efx_start_all(struct efx_nic *efx)
1960
-{
1961
- EFX_ASSERT_RESET_SERIALISED(efx);
1962
- BUG_ON(efx->state == STATE_DISABLED);
1963
-
1964
- /* Check that it is appropriate to restart the interface. All
1965
- * of these flags are safe to read under just the rtnl lock */
1966
- if (efx->port_enabled || !netif_running(efx->net_dev) ||
1967
- efx->reset_pending)
1968
- return;
1969
-
1970
- efx_start_port(efx);
1971
- efx_start_datapath(efx);
1972
-
1973
- /* Start the hardware monitor if there is one */
1974
- if (efx->type->monitor != NULL)
1975
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
1976
- efx_monitor_interval);
1977
-
1978
- /* Link state detection is normally event-driven; we have
1979
- * to poll now because we could have missed a change
1980
- */
1981
- mutex_lock(&efx->mac_lock);
1982
- if (efx->phy_op->poll(efx))
1983
- efx_link_status_changed(efx);
1984
- mutex_unlock(&efx->mac_lock);
1985
-
1986
- efx->type->start_stats(efx);
1987
- efx->type->pull_stats(efx);
1988
- spin_lock_bh(&efx->stats_lock);
1989
- efx->type->update_stats(efx, NULL, NULL);
1990
- spin_unlock_bh(&efx->stats_lock);
1991
-}
1992
-
1993
-/* Quiesce the hardware and software data path, and regular activity
1994
- * for the port without bringing the link down. Safe to call multiple
1995
- * times with the NIC in almost any state, but interrupts should be
1996
- * enabled. Requires the RTNL lock.
1997
- */
1998
-static void efx_stop_all(struct efx_nic *efx)
1999
-{
2000
- EFX_ASSERT_RESET_SERIALISED(efx);
2001
-
2002
- /* port_enabled can be read safely under the rtnl lock */
2003
- if (!efx->port_enabled)
2004
- return;
2005
-
2006
- /* update stats before we go down so we can accurately count
2007
- * rx_nodesc_drops
2008
- */
2009
- efx->type->pull_stats(efx);
2010
- spin_lock_bh(&efx->stats_lock);
2011
- efx->type->update_stats(efx, NULL, NULL);
2012
- spin_unlock_bh(&efx->stats_lock);
2013
- efx->type->stop_stats(efx);
2014
- efx_stop_port(efx);
2015
-
2016
- /* Stop the kernel transmit interface. This is only valid if
2017
- * the device is stopped or detached; otherwise the watchdog
2018
- * may fire immediately.
2019
- */
2020
- WARN_ON(netif_running(efx->net_dev) &&
2021
- netif_device_present(efx->net_dev));
2022
- netif_tx_disable(efx->net_dev);
2023
-
2024
- efx_stop_datapath(efx);
2025
-}
2026
-
2027390 static void efx_remove_all(struct efx_nic *efx)
2028391 {
392
+ rtnl_lock();
393
+ efx_xdp_setup_prog(efx, NULL);
394
+ rtnl_unlock();
395
+
2029396 efx_remove_channels(efx);
2030397 efx_remove_filters(efx);
2031398 #ifdef CONFIG_SFC_SRIOV
....@@ -2086,6 +453,8 @@
2086453 channel->irq_moderation_us = rx_usecs;
2087454 else if (efx_channel_has_tx_queues(channel))
2088455 channel->irq_moderation_us = tx_usecs;
456
+ else if (efx_channel_is_xdp_tx(channel))
457
+ channel->irq_moderation_us = tx_usecs;
2089458 }
2090459
2091460 return 0;
....@@ -2113,36 +482,6 @@
2113482
2114483 /**************************************************************************
2115484 *
2116
- * Hardware monitor
2117
- *
2118
- **************************************************************************/
2119
-
2120
-/* Run periodically off the general workqueue */
2121
-static void efx_monitor(struct work_struct *data)
2122
-{
2123
- struct efx_nic *efx = container_of(data, struct efx_nic,
2124
- monitor_work.work);
2125
-
2126
- netif_vdbg(efx, timer, efx->net_dev,
2127
- "hardware monitor executing on CPU %d\n",
2128
- raw_smp_processor_id());
2129
- BUG_ON(efx->type->monitor == NULL);
2130
-
2131
- /* If the mac_lock is already held then it is likely a port
2132
- * reconfiguration is already in place, which will likely do
2133
- * most of the work of monitor() anyway. */
2134
- if (mutex_trylock(&efx->mac_lock)) {
2135
- if (efx->port_enabled)
2136
- efx->type->monitor(efx);
2137
- mutex_unlock(&efx->mac_lock);
2138
- }
2139
-
2140
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
2141
- efx_monitor_interval);
2142
-}
2143
-
2144
-/**************************************************************************
2145
- *
2146485 * ioctls
2147486 *
2148487 *************************************************************************/
....@@ -2166,45 +505,6 @@
2166505 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
2167506
2168507 return mdio_mii_ioctl(&efx->mdio, data, cmd);
2169
-}
2170
-
2171
-/**************************************************************************
2172
- *
2173
- * NAPI interface
2174
- *
2175
- **************************************************************************/
2176
-
2177
-static void efx_init_napi_channel(struct efx_channel *channel)
2178
-{
2179
- struct efx_nic *efx = channel->efx;
2180
-
2181
- channel->napi_dev = efx->net_dev;
2182
- netif_napi_add(channel->napi_dev, &channel->napi_str,
2183
- efx_poll, napi_weight);
2184
-}
2185
-
2186
-static void efx_init_napi(struct efx_nic *efx)
2187
-{
2188
- struct efx_channel *channel;
2189
-
2190
- efx_for_each_channel(channel, efx)
2191
- efx_init_napi_channel(channel);
2192
-}
2193
-
2194
-static void efx_fini_napi_channel(struct efx_channel *channel)
2195
-{
2196
- if (channel->napi_dev)
2197
- netif_napi_del(&channel->napi_str);
2198
-
2199
- channel->napi_dev = NULL;
2200
-}
2201
-
2202
-static void efx_fini_napi(struct efx_nic *efx)
2203
-{
2204
- struct efx_channel *channel;
2205
-
2206
- efx_for_each_channel(channel, efx)
2207
- efx_fini_napi_channel(channel);
2208508 }
2209509
2210510 /**************************************************************************
....@@ -2237,7 +537,9 @@
2237537 efx_start_all(efx);
2238538 if (efx->state == STATE_DISABLED || efx->reset_pending)
2239539 netif_device_detach(efx->net_dev);
2240
- efx_selftest_async_start(efx);
540
+ else
541
+ efx->state = STATE_NET_UP;
542
+
2241543 return 0;
2242544 }
2243545
....@@ -2255,146 +557,6 @@
2255557 /* Stop the device and flush all the channels */
2256558 efx_stop_all(efx);
2257559
2258
- return 0;
2259
-}
2260
-
2261
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
2262
-static void efx_net_stats(struct net_device *net_dev,
2263
- struct rtnl_link_stats64 *stats)
2264
-{
2265
- struct efx_nic *efx = netdev_priv(net_dev);
2266
-
2267
- spin_lock_bh(&efx->stats_lock);
2268
- efx->type->update_stats(efx, NULL, stats);
2269
- spin_unlock_bh(&efx->stats_lock);
2270
-}
2271
-
2272
-/* Context: netif_tx_lock held, BHs disabled. */
2273
-static void efx_watchdog(struct net_device *net_dev)
2274
-{
2275
- struct efx_nic *efx = netdev_priv(net_dev);
2276
-
2277
- netif_err(efx, tx_err, efx->net_dev,
2278
- "TX stuck with port_enabled=%d: resetting channels\n",
2279
- efx->port_enabled);
2280
-
2281
- efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2282
-}
2283
-
2284
-
2285
-/* Context: process, rtnl_lock() held. */
2286
-static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
2287
-{
2288
- struct efx_nic *efx = netdev_priv(net_dev);
2289
- int rc;
2290
-
2291
- rc = efx_check_disabled(efx);
2292
- if (rc)
2293
- return rc;
2294
-
2295
- netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2296
-
2297
- efx_device_detach_sync(efx);
2298
- efx_stop_all(efx);
2299
-
2300
- mutex_lock(&efx->mac_lock);
2301
- net_dev->mtu = new_mtu;
2302
- efx_mac_reconfigure(efx);
2303
- mutex_unlock(&efx->mac_lock);
2304
-
2305
- efx_start_all(efx);
2306
- efx_device_attach_if_not_resetting(efx);
2307
- return 0;
2308
-}
2309
-
2310
-static int efx_set_mac_address(struct net_device *net_dev, void *data)
2311
-{
2312
- struct efx_nic *efx = netdev_priv(net_dev);
2313
- struct sockaddr *addr = data;
2314
- u8 *new_addr = addr->sa_data;
2315
- u8 old_addr[6];
2316
- int rc;
2317
-
2318
- if (!is_valid_ether_addr(new_addr)) {
2319
- netif_err(efx, drv, efx->net_dev,
2320
- "invalid ethernet MAC address requested: %pM\n",
2321
- new_addr);
2322
- return -EADDRNOTAVAIL;
2323
- }
2324
-
2325
- /* save old address */
2326
- ether_addr_copy(old_addr, net_dev->dev_addr);
2327
- ether_addr_copy(net_dev->dev_addr, new_addr);
2328
- if (efx->type->set_mac_address) {
2329
- rc = efx->type->set_mac_address(efx);
2330
- if (rc) {
2331
- ether_addr_copy(net_dev->dev_addr, old_addr);
2332
- return rc;
2333
- }
2334
- }
2335
-
2336
- /* Reconfigure the MAC */
2337
- mutex_lock(&efx->mac_lock);
2338
- efx_mac_reconfigure(efx);
2339
- mutex_unlock(&efx->mac_lock);
2340
-
2341
- return 0;
2342
-}
2343
-
2344
-/* Context: netif_addr_lock held, BHs disabled. */
2345
-static void efx_set_rx_mode(struct net_device *net_dev)
2346
-{
2347
- struct efx_nic *efx = netdev_priv(net_dev);
2348
-
2349
- if (efx->port_enabled)
2350
- queue_work(efx->workqueue, &efx->mac_work);
2351
- /* Otherwise efx_start_port() will do this */
2352
-}
2353
-
2354
-static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2355
-{
2356
- struct efx_nic *efx = netdev_priv(net_dev);
2357
- int rc;
2358
-
2359
- /* If disabling RX n-tuple filtering, clear existing filters */
2360
- if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2361
- rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
2362
- if (rc)
2363
- return rc;
2364
- }
2365
-
2366
- /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
2367
- * If rx-fcs is changed, mac_reconfigure updates that too.
2368
- */
2369
- if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
2370
- NETIF_F_RXFCS)) {
2371
- /* efx_set_rx_mode() will schedule MAC work to update filters
2372
- * when a new features are finally set in net_dev.
2373
- */
2374
- efx_set_rx_mode(net_dev);
2375
- }
2376
-
2377
- return 0;
2378
-}
2379
-
2380
-static int efx_get_phys_port_id(struct net_device *net_dev,
2381
- struct netdev_phys_item_id *ppid)
2382
-{
2383
- struct efx_nic *efx = netdev_priv(net_dev);
2384
-
2385
- if (efx->type->get_phys_port_id)
2386
- return efx->type->get_phys_port_id(efx, ppid);
2387
- else
2388
- return -EOPNOTSUPP;
2389
-}
2390
-
2391
-static int efx_get_phys_port_name(struct net_device *net_dev,
2392
- char *name, size_t len)
2393
-{
2394
- struct efx_nic *efx = netdev_priv(net_dev);
2395
-
2396
- if (snprintf(name, len, "p%u", efx->port_num) >= len)
2397
- return -EINVAL;
2398560 return 0;
2399561 }
2400562
....@@ -2418,52 +580,6 @@
2418580 return -EOPNOTSUPP;
2419581 }
2420582
2421
-static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in)
2422
-{
2423
- switch (in) {
2424
- case UDP_TUNNEL_TYPE_VXLAN:
2425
- return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
2426
- case UDP_TUNNEL_TYPE_GENEVE:
2427
- return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
2428
- default:
2429
- return -1;
2430
- }
2431
-}
2432
-
2433
-static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
2434
-{
2435
- struct efx_nic *efx = netdev_priv(dev);
2436
- struct efx_udp_tunnel tnl;
2437
- int efx_tunnel_type;
2438
-
2439
- efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
2440
- if (efx_tunnel_type < 0)
2441
- return;
2442
-
2443
- tnl.type = (u16)efx_tunnel_type;
2444
- tnl.port = ti->port;
2445
-
2446
- if (efx->type->udp_tnl_add_port)
2447
- (void)efx->type->udp_tnl_add_port(efx, tnl);
2448
-}
2449
-
2450
-static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
2451
-{
2452
- struct efx_nic *efx = netdev_priv(dev);
2453
- struct efx_udp_tunnel tnl;
2454
- int efx_tunnel_type;
2455
-
2456
- efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
2457
- if (efx_tunnel_type < 0)
2458
- return;
2459
-
2460
- tnl.type = (u16)efx_tunnel_type;
2461
- tnl.port = ti->port;
2462
-
2463
- if (efx->type->udp_tnl_del_port)
2464
- (void)efx->type->udp_tnl_del_port(efx, tnl);
2465
-}
2466
-
2467583 static const struct net_device_ops efx_netdev_ops = {
2468584 .ndo_open = efx_net_open,
2469585 .ndo_stop = efx_net_stop,
....@@ -2476,6 +592,7 @@
2476592 .ndo_set_mac_address = efx_set_mac_address,
2477593 .ndo_set_rx_mode = efx_set_rx_mode,
2478594 .ndo_set_features = efx_set_features,
595
+ .ndo_features_check = efx_features_check,
2479596 .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
2480597 .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
2481598 #ifdef CONFIG_SFC_SRIOV
....@@ -2491,9 +608,61 @@
2491608 #ifdef CONFIG_RFS_ACCEL
2492609 .ndo_rx_flow_steer = efx_filter_rfs,
2493610 #endif
2494
- .ndo_udp_tunnel_add = efx_udp_tunnel_add,
2495
- .ndo_udp_tunnel_del = efx_udp_tunnel_del,
611
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
612
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
613
+ .ndo_xdp_xmit = efx_xdp_xmit,
614
+ .ndo_bpf = efx_xdp
2496615 };
616
+
617
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
618
+{
619
+ struct bpf_prog *old_prog;
620
+
621
+ if (efx->xdp_rxq_info_failed) {
622
+ netif_err(efx, drv, efx->net_dev,
623
+ "Unable to bind XDP program due to previous failure of rxq_info\n");
624
+ return -EINVAL;
625
+ }
626
+
627
+ if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) {
628
+ netif_err(efx, drv, efx->net_dev,
629
+ "Unable to configure XDP with MTU of %d (max: %d)\n",
630
+ efx->net_dev->mtu, efx_xdp_max_mtu(efx));
631
+ return -EINVAL;
632
+ }
633
+
634
+ old_prog = rtnl_dereference(efx->xdp_prog);
635
+ rcu_assign_pointer(efx->xdp_prog, prog);
636
+ /* Release the reference that was originally passed by the caller. */
637
+ if (old_prog)
638
+ bpf_prog_put(old_prog);
639
+
640
+ return 0;
641
+}
642
+
643
+/* Context: process, rtnl_lock() held. */
644
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
645
+{
646
+ struct efx_nic *efx = netdev_priv(dev);
647
+
648
+ switch (xdp->command) {
649
+ case XDP_SETUP_PROG:
650
+ return efx_xdp_setup_prog(efx, xdp->prog);
651
+ default:
652
+ return -EINVAL;
653
+ }
654
+}
655
+
656
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
657
+ u32 flags)
658
+{
659
+ struct efx_nic *efx = netdev_priv(dev);
660
+
661
+ if (!netif_running(dev))
662
+ return -EINVAL;
663
+
664
+ return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
665
+}
2497666
2498667 static void efx_update_name(struct efx_nic *efx)
2499668 {
....@@ -2521,32 +690,10 @@
2521690 static ssize_t
2522691 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2523692 {
2524
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
693
+ struct efx_nic *efx = dev_get_drvdata(dev);
2525694 return sprintf(buf, "%d\n", efx->phy_type);
2526695 }
2527696 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2528
-
2529
-#ifdef CONFIG_SFC_MCDI_LOGGING
2530
-static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
2531
- char *buf)
2532
-{
2533
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2534
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2535
-
2536
- return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
2537
-}
2538
-static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
2539
- const char *buf, size_t count)
2540
-{
2541
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2542
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2543
- bool enable = count > 0 && *buf != '0';
2544
-
2545
- mcdi->logging_enabled = enable;
2546
- return count;
2547
-}
2548
-static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
2549
-#endif
2550697
2551698 static int efx_register_netdev(struct efx_nic *efx)
2552699 {
....@@ -2570,8 +717,6 @@
2570717 * already requested. If so, the NIC is probably hosed so we
2571718 * abort.
2572719 */
2573
- efx->state = STATE_READY;
2574
- smp_mb(); /* ensure we change state before checking reset_pending */
2575720 if (efx->reset_pending) {
2576721 netif_err(efx, probe, efx->net_dev,
2577722 "aborting probe due to scheduled reset\n");
....@@ -2599,6 +744,8 @@
2599744
2600745 efx_associate(efx);
2601746
747
+ efx->state = STATE_NET_DOWN;
748
+
2602749 rtnl_unlock();
2603750
2604751 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
....@@ -2607,21 +754,11 @@
2607754 "failed to init net dev attributes\n");
2608755 goto fail_registered;
2609756 }
2610
-#ifdef CONFIG_SFC_MCDI_LOGGING
2611
- rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2612
- if (rc) {
2613
- netif_err(efx, drv, efx->net_dev,
2614
- "failed to init net dev attributes\n");
2615
- goto fail_attr_mcdi_logging;
2616
- }
2617
-#endif
757
+
758
+ efx_init_mcdi_logging(efx);
2618759
2619760 return 0;
2620761
2621
-#ifdef CONFIG_SFC_MCDI_LOGGING
2622
-fail_attr_mcdi_logging:
2623
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2624
-#endif
2625762 fail_registered:
2626763 rtnl_lock();
2627764 efx_dissociate(efx);
....@@ -2642,298 +779,10 @@
2642779
2643780 if (efx_dev_registered(efx)) {
2644781 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2645
-#ifdef CONFIG_SFC_MCDI_LOGGING
2646
- device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2647
-#endif
782
+ efx_fini_mcdi_logging(efx);
2648783 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2649784 unregister_netdev(efx->net_dev);
2650785 }
2651
-}
2652
-
2653
-/**************************************************************************
2654
- *
2655
- * Device reset and suspend
2656
- *
2657
- **************************************************************************/
2658
-
2659
-/* Tears down the entire software state and most of the hardware state
2660
- * before reset. */
2661
-void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2662
-{
2663
- EFX_ASSERT_RESET_SERIALISED(efx);
2664
-
2665
- if (method == RESET_TYPE_MCDI_TIMEOUT)
2666
- efx->type->prepare_flr(efx);
2667
-
2668
- efx_stop_all(efx);
2669
- efx_disable_interrupts(efx);
2670
-
2671
- mutex_lock(&efx->mac_lock);
2672
- down_write(&efx->filter_sem);
2673
- mutex_lock(&efx->rss_lock);
2674
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2675
- method != RESET_TYPE_DATAPATH)
2676
- efx->phy_op->fini(efx);
2677
- efx->type->fini(efx);
2678
-}
2679
-
2680
-/* This function will always ensure that the locks acquired in
2681
- * efx_reset_down() are released. A failure return code indicates
2682
- * that we were unable to reinitialise the hardware, and the
2683
- * driver should be disabled. If ok is false, then the rx and tx
2684
- * engines are not restarted, pending a RESET_DISABLE. */
2685
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2686
-{
2687
- int rc;
2688
-
2689
- EFX_ASSERT_RESET_SERIALISED(efx);
2690
-
2691
- if (method == RESET_TYPE_MCDI_TIMEOUT)
2692
- efx->type->finish_flr(efx);
2693
-
2694
- /* Ensure that SRAM is initialised even if we're disabling the device */
2695
- rc = efx->type->init(efx);
2696
- if (rc) {
2697
- netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2698
- goto fail;
2699
- }
2700
-
2701
- if (!ok)
2702
- goto fail;
2703
-
2704
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2705
- method != RESET_TYPE_DATAPATH) {
2706
- rc = efx->phy_op->init(efx);
2707
- if (rc)
2708
- goto fail;
2709
- rc = efx->phy_op->reconfigure(efx);
2710
- if (rc && rc != -EPERM)
2711
- netif_err(efx, drv, efx->net_dev,
2712
- "could not restore PHY settings\n");
2713
- }
2714
-
2715
- rc = efx_enable_interrupts(efx);
2716
- if (rc)
2717
- goto fail;
2718
-
2719
-#ifdef CONFIG_SFC_SRIOV
2720
- rc = efx->type->vswitching_restore(efx);
2721
- if (rc) /* not fatal; the PF will still work fine */
2722
- netif_warn(efx, probe, efx->net_dev,
2723
- "failed to restore vswitching rc=%d;"
2724
- " VFs may not function\n", rc);
2725
-#endif
2726
-
2727
- if (efx->type->rx_restore_rss_contexts)
2728
- efx->type->rx_restore_rss_contexts(efx);
2729
- mutex_unlock(&efx->rss_lock);
2730
- efx->type->filter_table_restore(efx);
2731
- up_write(&efx->filter_sem);
2732
- if (efx->type->sriov_reset)
2733
- efx->type->sriov_reset(efx);
2734
-
2735
- mutex_unlock(&efx->mac_lock);
2736
-
2737
- efx_start_all(efx);
2738
-
2739
- if (efx->type->udp_tnl_push_ports)
2740
- efx->type->udp_tnl_push_ports(efx);
2741
-
2742
- return 0;
2743
-
2744
-fail:
2745
- efx->port_initialized = false;
2746
-
2747
- mutex_unlock(&efx->rss_lock);
2748
- up_write(&efx->filter_sem);
2749
- mutex_unlock(&efx->mac_lock);
2750
-
2751
- return rc;
2752
-}
2753
-
2754
-/* Reset the NIC using the specified method. Note that the reset may
2755
- * fail, in which case the card will be left in an unusable state.
2756
- *
2757
- * Caller must hold the rtnl_lock.
2758
- */
2759
-int efx_reset(struct efx_nic *efx, enum reset_type method)
2760
-{
2761
- int rc, rc2;
2762
- bool disabled;
2763
-
2764
- netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2765
- RESET_TYPE(method));
2766
-
2767
- efx_device_detach_sync(efx);
2768
- efx_reset_down(efx, method);
2769
-
2770
- rc = efx->type->reset(efx, method);
2771
- if (rc) {
2772
- netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2773
- goto out;
2774
- }
2775
-
2776
- /* Clear flags for the scopes we covered. We assume the NIC and
2777
- * driver are now quiescent so that there is no race here.
2778
- */
2779
- if (method < RESET_TYPE_MAX_METHOD)
2780
- efx->reset_pending &= -(1 << (method + 1));
2781
- else /* it doesn't fit into the well-ordered scope hierarchy */
2782
- __clear_bit(method, &efx->reset_pending);
2783
-
2784
- /* Reinitialise bus-mastering, which may have been turned off before
2785
- * the reset was scheduled. This is still appropriate, even in the
2786
- * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2787
- * can respond to requests. */
2788
- pci_set_master(efx->pci_dev);
2789
-
2790
-out:
2791
- /* Leave device stopped if necessary */
2792
- disabled = rc ||
2793
- method == RESET_TYPE_DISABLE ||
2794
- method == RESET_TYPE_RECOVER_OR_DISABLE;
2795
- rc2 = efx_reset_up(efx, method, !disabled);
2796
- if (rc2) {
2797
- disabled = true;
2798
- if (!rc)
2799
- rc = rc2;
2800
- }
2801
-
2802
- if (disabled) {
2803
- dev_close(efx->net_dev);
2804
- netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2805
- efx->state = STATE_DISABLED;
2806
- } else {
2807
- netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2808
- efx_device_attach_if_not_resetting(efx);
2809
- }
2810
- return rc;
2811
-}
2812
-
2813
-/* Try recovery mechanisms.
2814
- * For now only EEH is supported.
2815
- * Returns 0 if the recovery mechanisms are unsuccessful.
2816
- * Returns a non-zero value otherwise.
2817
- */
2818
-int efx_try_recovery(struct efx_nic *efx)
2819
-{
2820
-#ifdef CONFIG_EEH
2821
- /* A PCI error can occur and not be seen by EEH because nothing
2822
- * happens on the PCI bus. In this case the driver may fail and
2823
- * schedule a 'recover or reset', leading to this recovery handler.
2824
- * Manually call the eeh failure check function.
2825
- */
2826
- struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2827
- if (eeh_dev_check_failure(eehdev)) {
2828
- /* The EEH mechanisms will handle the error and reset the
2829
- * device if necessary.
2830
- */
2831
- return 1;
2832
- }
2833
-#endif
2834
- return 0;
2835
-}
2836
-
2837
-static void efx_wait_for_bist_end(struct efx_nic *efx)
2838
-{
2839
- int i;
2840
-
2841
- for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
2842
- if (efx_mcdi_poll_reboot(efx))
2843
- goto out;
2844
- msleep(BIST_WAIT_DELAY_MS);
2845
- }
2846
-
2847
- netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
2848
-out:
2849
- /* Either way unset the BIST flag. If we found no reboot we probably
2850
- * won't recover, but we should try.
2851
- */
2852
- efx->mc_bist_for_other_fn = false;
2853
-}
2854
-
2855
-/* The worker thread exists so that code that cannot sleep can
2856
- * schedule a reset for later.
2857
- */
2858
-static void efx_reset_work(struct work_struct *data)
2859
-{
2860
- struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2861
- unsigned long pending;
2862
- enum reset_type method;
2863
-
2864
- pending = READ_ONCE(efx->reset_pending);
2865
- method = fls(pending) - 1;
2866
-
2867
- if (method == RESET_TYPE_MC_BIST)
2868
- efx_wait_for_bist_end(efx);
2869
-
2870
- if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2871
- method == RESET_TYPE_RECOVER_OR_ALL) &&
2872
- efx_try_recovery(efx))
2873
- return;
2874
-
2875
- if (!pending)
2876
- return;
2877
-
2878
- rtnl_lock();
2879
-
2880
- /* We checked the state in efx_schedule_reset() but it may
2881
- * have changed by now. Now that we have the RTNL lock,
2882
- * it cannot change again.
2883
- */
2884
- if (efx->state == STATE_READY)
2885
- (void)efx_reset(efx, method);
2886
-
2887
- rtnl_unlock();
2888
-}
2889
-
2890
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2891
-{
2892
- enum reset_type method;
2893
-
2894
- if (efx->state == STATE_RECOVERY) {
2895
- netif_dbg(efx, drv, efx->net_dev,
2896
- "recovering: skip scheduling %s reset\n",
2897
- RESET_TYPE(type));
2898
- return;
2899
- }
2900
-
2901
- switch (type) {
2902
- case RESET_TYPE_INVISIBLE:
2903
- case RESET_TYPE_ALL:
2904
- case RESET_TYPE_RECOVER_OR_ALL:
2905
- case RESET_TYPE_WORLD:
2906
- case RESET_TYPE_DISABLE:
2907
- case RESET_TYPE_RECOVER_OR_DISABLE:
2908
- case RESET_TYPE_DATAPATH:
2909
- case RESET_TYPE_MC_BIST:
2910
- case RESET_TYPE_MCDI_TIMEOUT:
2911
- method = type;
2912
- netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2913
- RESET_TYPE(method));
2914
- break;
2915
- default:
2916
- method = efx->type->map_reset_reason(type);
2917
- netif_dbg(efx, drv, efx->net_dev,
2918
- "scheduling %s reset for %s\n",
2919
- RESET_TYPE(method), RESET_TYPE(type));
2920
- break;
2921
- }
2922
-
2923
- set_bit(method, &efx->reset_pending);
2924
- smp_mb(); /* ensure we change reset_pending before checking state */
2925
-
2926
- /* If we're not READY then just leave the flags set as the cue
2927
- * to abort probing or reschedule the reset later.
2928
- */
2929
- if (READ_ONCE(efx->state) != STATE_READY)
2930
- return;
2931
-
2932
- /* efx_process_channel() will no longer read events once a
2933
- * reset is scheduled. So switch back to poll'd MCDI completions. */
2934
- efx_mcdi_mode_poll(efx);
2935
-
2936
- queue_work(reset_workqueue, &efx->reset_work);
2937786 }
2938787
2939788 /**************************************************************************
....@@ -2969,138 +818,9 @@
2969818
2970819 /**************************************************************************
2971820 *
2972
- * Dummy PHY/MAC operations
2973
- *
2974
- * Can be used for some unimplemented operations
2975
- * Needed so all function pointers are valid and do not have to be tested
2976
- * before use
2977
- *
2978
- **************************************************************************/
2979
-int efx_port_dummy_op_int(struct efx_nic *efx)
2980
-{
2981
- return 0;
2982
-}
2983
-void efx_port_dummy_op_void(struct efx_nic *efx) {}
2984
-
2985
-static bool efx_port_dummy_op_poll(struct efx_nic *efx)
2986
-{
2987
- return false;
2988
-}
2989
-
2990
-static const struct efx_phy_operations efx_dummy_phy_operations = {
2991
- .init = efx_port_dummy_op_int,
2992
- .reconfigure = efx_port_dummy_op_int,
2993
- .poll = efx_port_dummy_op_poll,
2994
- .fini = efx_port_dummy_op_void,
2995
-};
2996
-
2997
-/**************************************************************************
2998
- *
2999821 * Data housekeeping
3000822 *
3001823 **************************************************************************/
3002
-
3003
-/* This zeroes out and then fills in the invariants in a struct
3004
- * efx_nic (including all sub-structures).
3005
- */
3006
-static int efx_init_struct(struct efx_nic *efx,
3007
- struct pci_dev *pci_dev, struct net_device *net_dev)
3008
-{
3009
- int rc = -ENOMEM, i;
3010
-
3011
- /* Initialise common structures */
3012
- INIT_LIST_HEAD(&efx->node);
3013
- INIT_LIST_HEAD(&efx->secondary_list);
3014
- spin_lock_init(&efx->biu_lock);
3015
-#ifdef CONFIG_SFC_MTD
3016
- INIT_LIST_HEAD(&efx->mtd_list);
3017
-#endif
3018
- INIT_WORK(&efx->reset_work, efx_reset_work);
3019
- INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
3020
- INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
3021
- efx->pci_dev = pci_dev;
3022
- efx->msg_enable = debug;
3023
- efx->state = STATE_UNINIT;
3024
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
3025
-
3026
- efx->net_dev = net_dev;
3027
- efx->rx_prefix_size = efx->type->rx_prefix_size;
3028
- efx->rx_ip_align =
3029
- NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
3030
- efx->rx_packet_hash_offset =
3031
- efx->type->rx_hash_offset - efx->type->rx_prefix_size;
3032
- efx->rx_packet_ts_offset =
3033
- efx->type->rx_ts_offset - efx->type->rx_prefix_size;
3034
- INIT_LIST_HEAD(&efx->rss_context.list);
3035
- mutex_init(&efx->rss_lock);
3036
- spin_lock_init(&efx->stats_lock);
3037
- efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
3038
- efx->num_mac_stats = MC_CMD_MAC_NSTATS;
3039
- BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
3040
- mutex_init(&efx->mac_lock);
3041
-#ifdef CONFIG_RFS_ACCEL
3042
- mutex_init(&efx->rps_mutex);
3043
- spin_lock_init(&efx->rps_hash_lock);
3044
- /* Failure to allocate is not fatal, but may degrade ARFS performance */
3045
- efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
3046
- sizeof(*efx->rps_hash_table), GFP_KERNEL);
3047
-#endif
3048
- efx->phy_op = &efx_dummy_phy_operations;
3049
- efx->mdio.dev = net_dev;
3050
- INIT_WORK(&efx->mac_work, efx_mac_work);
3051
- init_waitqueue_head(&efx->flush_wq);
3052
-
3053
- for (i = 0; i < EFX_MAX_CHANNELS; i++) {
3054
- efx->channel[i] = efx_alloc_channel(efx, i, NULL);
3055
- if (!efx->channel[i])
3056
- goto fail;
3057
- efx->msi_context[i].efx = efx;
3058
- efx->msi_context[i].index = i;
3059
- }
3060
-
3061
- /* Higher numbered interrupt modes are less capable! */
3062
- if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
3063
- efx->type->min_interrupt_mode)) {
3064
- rc = -EIO;
3065
- goto fail;
3066
- }
3067
- efx->interrupt_mode = max(efx->type->max_interrupt_mode,
3068
- interrupt_mode);
3069
- efx->interrupt_mode = min(efx->type->min_interrupt_mode,
3070
- interrupt_mode);
3071
-
3072
- /* Would be good to use the net_dev name, but we're too early */
3073
- snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
3074
- pci_name(pci_dev));
3075
- efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
3076
- if (!efx->workqueue)
3077
- goto fail;
3078
-
3079
- return 0;
3080
-
3081
-fail:
3082
- efx_fini_struct(efx);
3083
- return rc;
3084
-}
3085
-
3086
-static void efx_fini_struct(struct efx_nic *efx)
3087
-{
3088
- int i;
3089
-
3090
-#ifdef CONFIG_RFS_ACCEL
3091
- kfree(efx->rps_hash_table);
3092
-#endif
3093
-
3094
- for (i = 0; i < EFX_MAX_CHANNELS; i++)
3095
- kfree(efx->channel[i]);
3096
-
3097
- kfree(efx->vpd_sn);
3098
-
3099
- if (efx->workqueue) {
3100
- destroy_workqueue(efx->workqueue);
3101
- efx->workqueue = NULL;
3102
- }
3103
-}
3104824
3105825 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
3106826 {
....@@ -3111,197 +831,6 @@
3111831 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
3112832 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
3113833 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
3114
-}
3115
-
3116
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
3117
- const struct efx_filter_spec *right)
3118
-{
3119
- if ((left->match_flags ^ right->match_flags) |
3120
- ((left->flags ^ right->flags) &
3121
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3122
- return false;
3123
-
3124
- return memcmp(&left->outer_vid, &right->outer_vid,
3125
- sizeof(struct efx_filter_spec) -
3126
- offsetof(struct efx_filter_spec, outer_vid)) == 0;
3127
-}
3128
-
3129
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
3130
-{
3131
- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3132
- return jhash2((const u32 *)&spec->outer_vid,
3133
- (sizeof(struct efx_filter_spec) -
3134
- offsetof(struct efx_filter_spec, outer_vid)) / 4,
3135
- 0);
3136
-}
3137
-
3138
-#ifdef CONFIG_RFS_ACCEL
3139
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
3140
- bool *force)
3141
-{
3142
- if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
3143
- /* ARFS is currently updating this entry, leave it */
3144
- return false;
3145
- }
3146
- if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
3147
- /* ARFS tried and failed to update this, so it's probably out
3148
- * of date. Remove the filter and the ARFS rule entry.
3149
- */
3150
- rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
3151
- *force = true;
3152
- return true;
3153
- } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
3154
- /* ARFS has moved on, so old filter is not needed. Since we did
3155
- * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
3156
- * not be removed by efx_rps_hash_del() subsequently.
3157
- */
3158
- *force = true;
3159
- return true;
3160
- }
3161
- /* Remove it iff ARFS wants to. */
3162
- return true;
3163
-}
3164
-
3165
-static
3166
-struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
3167
- const struct efx_filter_spec *spec)
3168
-{
3169
- u32 hash = efx_filter_spec_hash(spec);
3170
-
3171
- WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
3172
- if (!efx->rps_hash_table)
3173
- return NULL;
3174
- return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
3175
-}
3176
-
3177
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
3178
- const struct efx_filter_spec *spec)
3179
-{
3180
- struct efx_arfs_rule *rule;
3181
- struct hlist_head *head;
3182
- struct hlist_node *node;
3183
-
3184
- head = efx_rps_hash_bucket(efx, spec);
3185
- if (!head)
3186
- return NULL;
3187
- hlist_for_each(node, head) {
3188
- rule = container_of(node, struct efx_arfs_rule, node);
3189
- if (efx_filter_spec_equal(spec, &rule->spec))
3190
- return rule;
3191
- }
3192
- return NULL;
3193
-}
3194
-
3195
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
3196
- const struct efx_filter_spec *spec,
3197
- bool *new)
3198
-{
3199
- struct efx_arfs_rule *rule;
3200
- struct hlist_head *head;
3201
- struct hlist_node *node;
3202
-
3203
- head = efx_rps_hash_bucket(efx, spec);
3204
- if (!head)
3205
- return NULL;
3206
- hlist_for_each(node, head) {
3207
- rule = container_of(node, struct efx_arfs_rule, node);
3208
- if (efx_filter_spec_equal(spec, &rule->spec)) {
3209
- *new = false;
3210
- return rule;
3211
- }
3212
- }
3213
- rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
3214
- *new = true;
3215
- if (rule) {
3216
- memcpy(&rule->spec, spec, sizeof(rule->spec));
3217
- hlist_add_head(&rule->node, head);
3218
- }
3219
- return rule;
3220
-}
3221
-
3222
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
3223
-{
3224
- struct efx_arfs_rule *rule;
3225
- struct hlist_head *head;
3226
- struct hlist_node *node;
3227
-
3228
- head = efx_rps_hash_bucket(efx, spec);
3229
- if (WARN_ON(!head))
3230
- return;
3231
- hlist_for_each(node, head) {
3232
- rule = container_of(node, struct efx_arfs_rule, node);
3233
- if (efx_filter_spec_equal(spec, &rule->spec)) {
3234
- /* Someone already reused the entry. We know that if
3235
- * this check doesn't fire (i.e. filter_id == REMOVING)
3236
- * then the REMOVING mark was put there by our caller,
3237
- * because caller is holding a lock on filter table and
3238
- * only holders of that lock set REMOVING.
3239
- */
3240
- if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
3241
- return;
3242
- hlist_del(node);
3243
- kfree(rule);
3244
- return;
3245
- }
3246
- }
3247
- /* We didn't find it. */
3248
- WARN_ON(1);
3249
-}
3250
-#endif
3251
-
3252
-/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
3253
- * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
3254
- */
3255
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
3256
-{
3257
- struct list_head *head = &efx->rss_context.list;
3258
- struct efx_rss_context *ctx, *new;
3259
- u32 id = 1; /* Don't use zero, that refers to the master RSS context */
3260
-
3261
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
3262
-
3263
- /* Search for first gap in the numbering */
3264
- list_for_each_entry(ctx, head, list) {
3265
- if (ctx->user_id != id)
3266
- break;
3267
- id++;
3268
- /* Check for wrap. If this happens, we have nearly 2^32
3269
- * allocated RSS contexts, which seems unlikely.
3270
- */
3271
- if (WARN_ON_ONCE(!id))
3272
- return NULL;
3273
- }
3274
-
3275
- /* Create the new entry */
3276
- new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL);
3277
- if (!new)
3278
- return NULL;
3279
- new->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
3280
- new->rx_hash_udp_4tuple = false;
3281
-
3282
- /* Insert the new entry into the gap */
3283
- new->user_id = id;
3284
- list_add_tail(&new->list, &ctx->list);
3285
- return new;
3286
-}
3287
-
3288
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
3289
-{
3290
- struct list_head *head = &efx->rss_context.list;
3291
- struct efx_rss_context *ctx;
3292
-
3293
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
3294
-
3295
- list_for_each_entry(ctx, head, list)
3296
- if (ctx->user_id == id)
3297
- return ctx;
3298
- return NULL;
3299
-}
3300
-
3301
-void efx_free_rss_context_entry(struct efx_rss_context *ctx)
3302
-{
3303
- list_del(&ctx->list);
3304
- kfree(ctx);
3305834 }
3306835
3307836 /**************************************************************************
....@@ -3318,8 +847,8 @@
3318847 /* Flush reset_work. It can no longer be scheduled since we
3319848 * are not READY.
3320849 */
3321
- BUG_ON(efx->state == STATE_READY);
3322
- cancel_work_sync(&efx->reset_work);
850
+ WARN_ON(efx_net_active(efx->state));
851
+ efx_flush_reset_workqueue(efx);
3323852
3324853 efx_disable_interrupts(efx);
3325854 efx_clear_interrupt_affinity(efx);
....@@ -3512,18 +1041,18 @@
35121041 }
35131042
35141043 /* Determine netdevice features */
3515
- net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
3516
- NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
3517
- if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
3518
- net_dev->features |= NETIF_F_TSO6;
3519
- /* Check whether device supports TSO */
3520
- if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
3521
- net_dev->features &= ~NETIF_F_ALL_TSO;
1044
+ net_dev->features |= efx->type->offload_features;
1045
+
1046
+ /* Add TSO features */
1047
+ if (efx->type->tso_versions && efx->type->tso_versions(efx))
1048
+ net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1049
+
35221050 /* Mask for features that also apply to VLAN devices */
35231051 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
35241052 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
35251053 NETIF_F_RXCSUM);
35261054
1055
+ /* Determine user configurable features */
35271056 net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
35281057
35291058 /* Disable receiving frames with bad FCS, by default. */
....@@ -3582,7 +1111,8 @@
35821111 efx_probe_vpd_strings(efx);
35831112
35841113 /* Set up basic I/O (BAR mappings etc) */
3585
- rc = efx_init_io(efx);
1114
+ rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask,
1115
+ efx->type->mem_map_size(efx));
35861116 if (rc)
35871117 goto fail2;
35881118
....@@ -3618,11 +1148,7 @@
36181148 netif_warn(efx, probe, efx->net_dev,
36191149 "failed to create MTDs (%d)\n", rc);
36201150
3621
- rc = pci_enable_pcie_error_reporting(pci_dev);
3622
- if (rc && rc != -EINVAL)
3623
- netif_notice(efx, probe, efx->net_dev,
3624
- "PCIE error reporting unavailable (%d).\n",
3625
- rc);
1151
+ (void)pci_enable_pcie_error_reporting(pci_dev);
36261152
36271153 if (efx->type->udp_tnl_push_ports)
36281154 efx->type->udp_tnl_push_ports(efx);
....@@ -3662,17 +1188,17 @@
36621188
36631189 static int efx_pm_freeze(struct device *dev)
36641190 {
3665
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
1191
+ struct efx_nic *efx = dev_get_drvdata(dev);
36661192
36671193 rtnl_lock();
36681194
3669
- if (efx->state != STATE_DISABLED) {
3670
- efx->state = STATE_UNINIT;
3671
-
1195
+ if (efx_net_active(efx->state)) {
36721196 efx_device_detach_sync(efx);
36731197
36741198 efx_stop_all(efx);
36751199 efx_disable_interrupts(efx);
1200
+
1201
+ efx->state = efx_freeze(efx->state);
36761202 }
36771203
36781204 rtnl_unlock();
....@@ -3683,24 +1209,24 @@
36831209 static int efx_pm_thaw(struct device *dev)
36841210 {
36851211 int rc;
3686
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
1212
+ struct efx_nic *efx = dev_get_drvdata(dev);
36871213
36881214 rtnl_lock();
36891215
3690
- if (efx->state != STATE_DISABLED) {
1216
+ if (efx_frozen(efx->state)) {
36911217 rc = efx_enable_interrupts(efx);
36921218 if (rc)
36931219 goto fail;
36941220
36951221 mutex_lock(&efx->mac_lock);
3696
- efx->phy_op->reconfigure(efx);
1222
+ efx_mcdi_port_reconfigure(efx);
36971223 mutex_unlock(&efx->mac_lock);
36981224
36991225 efx_start_all(efx);
37001226
37011227 efx_device_attach_if_not_resetting(efx);
37021228
3703
- efx->state = STATE_READY;
1229
+ efx->state = efx_thaw(efx->state);
37041230
37051231 efx->type->resume_wol(efx);
37061232 }
....@@ -3708,7 +1234,7 @@
37081234 rtnl_unlock();
37091235
37101236 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
3711
- queue_work(reset_workqueue, &efx->reset_work);
1237
+ efx_queue_reset_work(efx);
37121238
37131239 return 0;
37141240
....@@ -3778,105 +1304,6 @@
37781304 .restore = efx_pm_resume,
37791305 };
37801306
3781
-/* A PCI error affecting this device was detected.
3782
- * At this point MMIO and DMA may be disabled.
3783
- * Stop the software path and request a slot reset.
3784
- */
3785
-static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
3786
- enum pci_channel_state state)
3787
-{
3788
- pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3789
- struct efx_nic *efx = pci_get_drvdata(pdev);
3790
-
3791
- if (state == pci_channel_io_perm_failure)
3792
- return PCI_ERS_RESULT_DISCONNECT;
3793
-
3794
- rtnl_lock();
3795
-
3796
- if (efx->state != STATE_DISABLED) {
3797
- efx->state = STATE_RECOVERY;
3798
- efx->reset_pending = 0;
3799
-
3800
- efx_device_detach_sync(efx);
3801
-
3802
- efx_stop_all(efx);
3803
- efx_disable_interrupts(efx);
3804
-
3805
- status = PCI_ERS_RESULT_NEED_RESET;
3806
- } else {
3807
- /* If the interface is disabled we don't want to do anything
3808
- * with it.
3809
- */
3810
- status = PCI_ERS_RESULT_RECOVERED;
3811
- }
3812
-
3813
- rtnl_unlock();
3814
-
3815
- pci_disable_device(pdev);
3816
-
3817
- return status;
3818
-}
3819
-
3820
-/* Fake a successful reset, which will be performed later in efx_io_resume. */
3821
-static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
3822
-{
3823
- struct efx_nic *efx = pci_get_drvdata(pdev);
3824
- pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3825
- int rc;
3826
-
3827
- if (pci_enable_device(pdev)) {
3828
- netif_err(efx, hw, efx->net_dev,
3829
- "Cannot re-enable PCI device after reset.\n");
3830
- status = PCI_ERS_RESULT_DISCONNECT;
3831
- }
3832
-
3833
- rc = pci_cleanup_aer_uncorrect_error_status(pdev);
3834
- if (rc) {
3835
- netif_err(efx, hw, efx->net_dev,
3836
- "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
3837
- /* Non-fatal error. Continue. */
3838
- }
3839
-
3840
- return status;
3841
-}
3842
-
3843
-/* Perform the actual reset and resume I/O operations. */
3844
-static void efx_io_resume(struct pci_dev *pdev)
3845
-{
3846
- struct efx_nic *efx = pci_get_drvdata(pdev);
3847
- int rc;
3848
-
3849
- rtnl_lock();
3850
-
3851
- if (efx->state == STATE_DISABLED)
3852
- goto out;
3853
-
3854
- rc = efx_reset(efx, RESET_TYPE_ALL);
3855
- if (rc) {
3856
- netif_err(efx, hw, efx->net_dev,
3857
- "efx_reset failed after PCI error (%d)\n", rc);
3858
- } else {
3859
- efx->state = STATE_READY;
3860
- netif_dbg(efx, hw, efx->net_dev,
3861
- "Done resetting and resuming IO after PCI error.\n");
3862
- }
3863
-
3864
-out:
3865
- rtnl_unlock();
3866
-}
3867
-
3868
-/* For simplicity and reliability, we always require a slot reset and try to
3869
- * reset the hardware when a pci error affecting the device is detected.
3870
- * We leave both the link_reset and mmio_enabled callback unimplemented:
3871
- * with our request for slot reset the mmio_enabled callback will never be
3872
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
3873
- */
3874
-static const struct pci_error_handlers efx_err_handlers = {
3875
- .error_detected = efx_io_error_detected,
3876
- .slot_reset = efx_io_slot_reset,
3877
- .resume = efx_io_resume,
3878
-};
3879
-
38801307 static struct pci_driver efx_pci_driver = {
38811308 .name = KBUILD_MODNAME,
38821309 .id_table = efx_pci_table,
....@@ -3895,15 +1322,11 @@
38951322 *
38961323 *************************************************************************/
38971324
3898
-module_param(interrupt_mode, uint, 0444);
3899
-MODULE_PARM_DESC(interrupt_mode,
3900
- "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3901
-
39021325 static int __init efx_init_module(void)
39031326 {
39041327 int rc;
39051328
3906
- printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
1329
+ printk(KERN_INFO "Solarflare NET driver\n");
39071330
39081331 rc = register_netdevice_notifier(&efx_netdev_notifier);
39091332 if (rc)
....@@ -3915,20 +1338,24 @@
39151338 goto err_sriov;
39161339 #endif
39171340
3918
- reset_workqueue = create_singlethread_workqueue("sfc_reset");
3919
- if (!reset_workqueue) {
3920
- rc = -ENOMEM;
1341
+ rc = efx_create_reset_workqueue();
1342
+ if (rc)
39211343 goto err_reset;
3922
- }
39231344
39241345 rc = pci_register_driver(&efx_pci_driver);
39251346 if (rc < 0)
39261347 goto err_pci;
39271348
1349
+ rc = pci_register_driver(&ef100_pci_driver);
1350
+ if (rc < 0)
1351
+ goto err_pci_ef100;
1352
+
39281353 return 0;
39291354
1355
+ err_pci_ef100:
1356
+ pci_unregister_driver(&efx_pci_driver);
39301357 err_pci:
3931
- destroy_workqueue(reset_workqueue);
1358
+ efx_destroy_reset_workqueue();
39321359 err_reset:
39331360 #ifdef CONFIG_SFC_SRIOV
39341361 efx_fini_sriov();
....@@ -3943,8 +1370,9 @@
39431370 {
39441371 printk(KERN_INFO "Solarflare NET driver unloading\n");
39451372
1373
+ pci_unregister_driver(&ef100_pci_driver);
39461374 pci_unregister_driver(&efx_pci_driver);
3947
- destroy_workqueue(reset_workqueue);
1375
+ efx_destroy_reset_workqueue();
39481376 #ifdef CONFIG_SFC_SRIOV
39491377 efx_fini_sriov();
39501378 #endif
....@@ -3960,4 +1388,3 @@
39601388 MODULE_DESCRIPTION("Solarflare network driver");
39611389 MODULE_LICENSE("GPL");
39621390 MODULE_DEVICE_TABLE(pci, efx_pci_table);
3963
-MODULE_VERSION(EFX_DRIVER_VERSION);