forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
....@@ -1,10 +1,8 @@
1
-/*
2
- * aQuantia Corporation Network Driver
3
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
1
+// SPDX-License-Identifier: GPL-2.0-only
2
+/* Atlantic Network Driver
43 *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
4
+ * Copyright (C) 2014-2019 aQuantia Corporation
5
+ * Copyright (C) 2019-2020 Marvell International Ltd.
86 */
97
108 /* File aq_nic.c: Definition of common code for NIC. */
....@@ -14,6 +12,11 @@
1412 #include "aq_vec.h"
1513 #include "aq_hw.h"
1614 #include "aq_pci_func.h"
15
+#include "aq_macsec.h"
16
+#include "aq_main.h"
17
+#include "aq_phy.h"
18
+#include "aq_ptp.h"
19
+#include "aq_filters.h"
1720
1821 #include <linux/moduleparam.h>
1922 #include <linux/netdevice.h>
....@@ -23,6 +26,7 @@
2326 #include <linux/ip.h>
2427 #include <linux/tcp.h>
2528 #include <net/ip.h>
29
+#include <net/pkt_cls.h>
2630
2731 static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
2832 module_param_named(aq_itr, aq_itr, uint, 0644);
....@@ -40,10 +44,6 @@
4044
4145 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
4246 {
43
- struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
44
- struct aq_rss_parameters *rss_params = &cfg->aq_rss;
45
- int i = 0;
46
-
4747 static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
4848 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
4949 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
....@@ -51,6 +51,11 @@
5151 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
5252 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
5353 };
54
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
55
+ struct aq_rss_parameters *rss_params;
56
+ int i = 0;
57
+
58
+ rss_params = &cfg->aq_rss;
5459
5560 rss_params->hash_secret_key_size = sizeof(rss_key);
5661 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
....@@ -60,10 +65,38 @@
6065 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
6166 }
6267
68
+/* Recalculate the number of vectors */
69
+static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self)
70
+{
71
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
72
+
73
+ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
74
+ cfg->vecs = min(cfg->vecs, num_online_cpus());
75
+ if (self->irqvecs > AQ_HW_SERVICE_IRQS)
76
+ cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
77
+ /* cfg->vecs should be power of 2 for RSS */
78
+ cfg->vecs = rounddown_pow_of_two(cfg->vecs);
79
+
80
+ if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
81
+ if (cfg->tcs > 2)
82
+ cfg->vecs = min(cfg->vecs, 4U);
83
+ }
84
+
85
+ if (cfg->vecs <= 4)
86
+ cfg->tc_mode = AQ_TC_MODE_8TCS;
87
+ else
88
+ cfg->tc_mode = AQ_TC_MODE_4TCS;
89
+
90
+ /*rss rings */
91
+ cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
92
+ aq_nic_rss_init(self, cfg->num_rss_queues);
93
+}
94
+
6395 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
6496 void aq_nic_cfg_start(struct aq_nic_s *self)
6597 {
6698 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
99
+ int i;
67100
68101 cfg->tcs = AQ_CFG_TCS_DEF;
69102
....@@ -73,40 +106,24 @@
73106 cfg->tx_itr = aq_itr_tx;
74107 cfg->rx_itr = aq_itr_rx;
75108
109
+ cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
76110 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
77
- cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
78111 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
79
- cfg->flow_control = AQ_CFG_FC_MODE;
112
+ cfg->fc.req = AQ_CFG_FC_MODE;
113
+ cfg->wol = AQ_CFG_WOL_MODES;
80114
81115 cfg->mtu = AQ_CFG_MTU_DEF;
82116 cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
83117 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
84118
85119 cfg->is_lro = AQ_CFG_IS_LRO_DEF;
86
-
87
- cfg->vlan_id = 0U;
88
-
89
- aq_nic_rss_init(self, cfg->num_rss_queues);
120
+ cfg->is_ptp = true;
90121
91122 /*descriptors */
92123 cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
93124 cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
94125
95
- /*rss rings */
96
- cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
97
- cfg->vecs = min(cfg->vecs, num_online_cpus());
98
- cfg->vecs = min(cfg->vecs, self->irqvecs);
99
- /* cfg->vecs should be power of 2 for RSS */
100
- if (cfg->vecs >= 8U)
101
- cfg->vecs = 8U;
102
- else if (cfg->vecs >= 4U)
103
- cfg->vecs = 4U;
104
- else if (cfg->vecs >= 2U)
105
- cfg->vecs = 2U;
106
- else
107
- cfg->vecs = 1U;
108
-
109
- cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
126
+ aq_nic_cfg_update_num_vecs(self);
110127
111128 cfg->irq_type = aq_pci_func_get_irq_type(self);
112129
....@@ -117,22 +134,56 @@
117134 cfg->vecs = 1U;
118135 }
119136
137
+ /* Check if we have enough vectors allocated for
138
+ * link status IRQ. If no - we'll know link state from
139
+ * slower service task.
140
+ */
141
+ if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
142
+ cfg->link_irq_vec = cfg->vecs;
143
+ else
144
+ cfg->link_irq_vec = 0;
145
+
120146 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
121
- cfg->hw_features = cfg->aq_hw_caps->hw_features;
147
+ cfg->features = cfg->aq_hw_caps->hw_features;
148
+ cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
149
+ cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
150
+ cfg->is_vlan_force_promisc = true;
151
+
152
+ for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
153
+ cfg->prio_tc_map[i] = cfg->tcs * i / 8;
122154 }
123155
124156 static int aq_nic_update_link_status(struct aq_nic_s *self)
125157 {
126158 int err = self->aq_fw_ops->update_link_status(self->aq_hw);
159
+ u32 fc = 0;
127160
128161 if (err)
129162 return err;
130163
164
+ if (self->aq_fw_ops->get_flow_control)
165
+ self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
166
+ self->aq_nic_cfg.fc.cur = fc;
167
+
131168 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
132
- pr_info("%s: link change old %d new %d\n",
133
- AQ_CFG_DRV_NAME, self->link_status.mbps,
134
- self->aq_hw->aq_link_status.mbps);
169
+ netdev_info(self->ndev, "%s: link change old %d new %d\n",
170
+ AQ_CFG_DRV_NAME, self->link_status.mbps,
171
+ self->aq_hw->aq_link_status.mbps);
135172 aq_nic_update_interrupt_moderation_settings(self);
173
+
174
+ if (self->aq_ptp) {
175
+ aq_ptp_clock_init(self);
176
+ aq_ptp_tm_offset_set(self,
177
+ self->aq_hw->aq_link_status.mbps);
178
+ aq_ptp_link_change(self);
179
+ }
180
+
181
+ /* Driver has to update flow control settings on RX block
182
+ * on any link event.
183
+ * We should query FW whether it negotiated FC.
184
+ */
185
+ if (self->aq_hw_ops->hw_set_fc)
186
+ self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
136187 }
137188
138189 self->link_status = self->aq_hw->aq_link_status;
....@@ -142,6 +193,12 @@
142193 aq_utils_obj_clear(&self->flags,
143194 AQ_NIC_LINK_DOWN);
144195 netif_carrier_on(self->ndev);
196
+#if IS_ENABLED(CONFIG_MACSEC)
197
+ aq_macsec_enable(self);
198
+#endif
199
+ if (self->aq_hw_ops->hw_tc_rate_limit_set)
200
+ self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw);
201
+
145202 netif_tx_wake_all_queues(self->ndev);
146203 }
147204 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
....@@ -149,47 +206,94 @@
149206 netif_tx_disable(self->ndev);
150207 aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
151208 }
209
+
152210 return 0;
211
+}
212
+
213
+static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
214
+{
215
+ struct aq_nic_s *self = private;
216
+
217
+ if (!self)
218
+ return IRQ_NONE;
219
+
220
+ aq_nic_update_link_status(self);
221
+
222
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw,
223
+ BIT(self->aq_nic_cfg.link_irq_vec));
224
+
225
+ return IRQ_HANDLED;
226
+}
227
+
228
+static void aq_nic_service_task(struct work_struct *work)
229
+{
230
+ struct aq_nic_s *self = container_of(work, struct aq_nic_s,
231
+ service_task);
232
+ int err;
233
+
234
+ aq_ptp_service_task(self);
235
+
236
+ if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
237
+ return;
238
+
239
+ err = aq_nic_update_link_status(self);
240
+ if (err)
241
+ return;
242
+
243
+#if IS_ENABLED(CONFIG_MACSEC)
244
+ aq_macsec_work(self);
245
+#endif
246
+
247
+ mutex_lock(&self->fwreq_mutex);
248
+ if (self->aq_fw_ops->update_stats)
249
+ self->aq_fw_ops->update_stats(self->aq_hw);
250
+ mutex_unlock(&self->fwreq_mutex);
251
+
252
+ aq_nic_update_ndev_stats(self);
153253 }
154254
155255 static void aq_nic_service_timer_cb(struct timer_list *t)
156256 {
157257 struct aq_nic_s *self = from_timer(self, t, service_timer);
158
- int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
159
- int err = 0;
160258
161
- if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
162
- goto err_exit;
259
+ mod_timer(&self->service_timer,
260
+ jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
163261
164
- err = aq_nic_update_link_status(self);
165
- if (err)
166
- goto err_exit;
167
-
168
- if (self->aq_fw_ops->update_stats)
169
- self->aq_fw_ops->update_stats(self->aq_hw);
170
-
171
- aq_nic_update_ndev_stats(self);
172
-
173
- /* If no link - use faster timer rate to detect link up asap */
174
- if (!netif_carrier_ok(self->ndev))
175
- ctimer = max(ctimer / 2, 1);
176
-
177
-err_exit:
178
- mod_timer(&self->service_timer, jiffies + ctimer);
262
+ aq_ndev_schedule_work(&self->service_task);
179263 }
180264
181265 static void aq_nic_polling_timer_cb(struct timer_list *t)
182266 {
183267 struct aq_nic_s *self = from_timer(self, t, polling_timer);
184
- struct aq_vec_s *aq_vec = NULL;
185268 unsigned int i = 0U;
186269
187
- for (i = 0U, aq_vec = self->aq_vec[0];
188
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
189
- aq_vec_isr(i, (void *)aq_vec);
270
+ for (i = 0U; self->aq_vecs > i; ++i)
271
+ aq_vec_isr(i, (void *)self->aq_vec[i]);
190272
191273 mod_timer(&self->polling_timer, jiffies +
192
- AQ_CFG_POLLING_TIMER_INTERVAL);
274
+ AQ_CFG_POLLING_TIMER_INTERVAL);
275
+}
276
+
277
+static int aq_nic_hw_prepare(struct aq_nic_s *self)
278
+{
279
+ int err = 0;
280
+
281
+ err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
282
+ if (err)
283
+ goto exit;
284
+
285
+ err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);
286
+
287
+exit:
288
+ return err;
289
+}
290
+
291
+static bool aq_nic_is_valid_ether_addr(const u8 *addr)
292
+{
293
+ /* Some engineering samples of Aquantia NICs are provisioned with a
294
+ * partially populated MAC, which is still invalid.
295
+ */
296
+ return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
193297 }
194298
195299 int aq_nic_ndev_register(struct aq_nic_s *self)
....@@ -201,14 +305,26 @@
201305 goto err_exit;
202306 }
203307
204
- err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
308
+ err = aq_nic_hw_prepare(self);
205309 if (err)
206310 goto err_exit;
207311
312
+#if IS_ENABLED(CONFIG_MACSEC)
313
+ aq_macsec_init(self);
314
+#endif
315
+
316
+ mutex_lock(&self->fwreq_mutex);
208317 err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
209318 self->ndev->dev_addr);
319
+ mutex_unlock(&self->fwreq_mutex);
210320 if (err)
211321 goto err_exit;
322
+
323
+ if (!is_valid_ether_addr(self->ndev->dev_addr) ||
324
+ !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
325
+ netdev_warn(self->ndev, "MAC is invalid, will use random.");
326
+ eth_hw_addr_random(self->ndev);
327
+ }
212328
213329 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
214330 {
....@@ -237,6 +353,10 @@
237353 goto err_exit;
238354
239355 err_exit:
356
+#if IS_ENABLED(CONFIG_MACSEC)
357
+ if (err)
358
+ aq_macsec_free(self);
359
+#endif
240360 return err;
241361 }
242362
....@@ -248,10 +368,13 @@
248368 self->ndev->hw_features |= aq_hw_caps->hw_features;
249369 self->ndev->features = aq_hw_caps->hw_features;
250370 self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
251
- NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
371
+ NETIF_F_RXHASH | NETIF_F_SG |
372
+ NETIF_F_LRO | NETIF_F_TSO | NETIF_F_TSO6;
373
+ self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
252374 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
253375 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
254376
377
+ self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
255378 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
256379 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
257380
....@@ -271,22 +394,63 @@
271394 int aq_nic_init(struct aq_nic_s *self)
272395 {
273396 struct aq_vec_s *aq_vec = NULL;
274
- int err = 0;
275397 unsigned int i = 0U;
398
+ int err = 0;
276399
277400 self->power_state = AQ_HW_POWER_STATE_D0;
401
+ mutex_lock(&self->fwreq_mutex);
278402 err = self->aq_hw_ops->hw_reset(self->aq_hw);
403
+ mutex_unlock(&self->fwreq_mutex);
279404 if (err < 0)
280405 goto err_exit;
406
+ /* Restore default settings */
407
+ aq_nic_set_downshift(self, self->aq_nic_cfg.downshift_counter);
408
+ aq_nic_set_media_detect(self, self->aq_nic_cfg.is_media_detect ?
409
+ AQ_HW_MEDIA_DETECT_CNT : 0);
281410
282411 err = self->aq_hw_ops->hw_init(self->aq_hw,
283412 aq_nic_get_ndev(self)->dev_addr);
284413 if (err < 0)
285414 goto err_exit;
286415
287
- for (i = 0U, aq_vec = self->aq_vec[0];
288
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
416
+ if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
417
+ self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
418
+ self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
419
+ err = aq_phy_init(self->aq_hw);
420
+
421
+ /* Disable the PTP on NICs where it's known to cause datapath
422
+ * problems.
423
+ * Ideally this should have been done by PHY provisioning, but
424
+ * many units have been shipped with enabled PTP block already.
425
+ */
426
+ if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP)
427
+ if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX)
428
+ aq_phy_disable_ptp(self->aq_hw);
429
+ }
430
+
431
+ for (i = 0U; i < self->aq_vecs; i++) {
432
+ aq_vec = self->aq_vec[i];
433
+ err = aq_vec_ring_alloc(aq_vec, self, i,
434
+ aq_nic_get_cfg(self));
435
+ if (err)
436
+ goto err_exit;
437
+
289438 aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
439
+ }
440
+
441
+ if (aq_nic_get_cfg(self)->is_ptp) {
442
+ err = aq_ptp_init(self, self->irqvecs - 1);
443
+ if (err < 0)
444
+ goto err_exit;
445
+
446
+ err = aq_ptp_ring_alloc(self);
447
+ if (err < 0)
448
+ goto err_exit;
449
+
450
+ err = aq_ptp_ring_init(self);
451
+ if (err < 0)
452
+ goto err_exit;
453
+ }
290454
291455 netif_carrier_off(self->ndev);
292456
....@@ -297,26 +461,35 @@
297461 int aq_nic_start(struct aq_nic_s *self)
298462 {
299463 struct aq_vec_s *aq_vec = NULL;
300
- int err = 0;
464
+ struct aq_nic_cfg_s *cfg;
301465 unsigned int i = 0U;
466
+ int err = 0;
467
+
468
+ cfg = aq_nic_get_cfg(self);
302469
303470 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
304
- self->mc_list.ar,
305
- self->mc_list.count);
471
+ self->mc_list.ar,
472
+ self->mc_list.count);
306473 if (err < 0)
307474 goto err_exit;
308475
309476 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
310
- self->packet_filter);
477
+ self->packet_filter);
311478 if (err < 0)
312479 goto err_exit;
313480
314
- for (i = 0U, aq_vec = self->aq_vec[0];
315
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
481
+ for (i = 0U; self->aq_vecs > i; ++i) {
482
+ aq_vec = self->aq_vec[i];
316483 err = aq_vec_start(aq_vec);
317484 if (err < 0)
318485 goto err_exit;
319486 }
487
+
488
+ err = aq_ptp_ring_start(self);
489
+ if (err < 0)
490
+ goto err_exit;
491
+
492
+ aq_nic_set_loopback(self);
320493
321494 err = self->aq_hw_ops->hw_start(self->aq_hw);
322495 if (err < 0)
....@@ -325,81 +498,137 @@
325498 err = aq_nic_update_interrupt_moderation_settings(self);
326499 if (err)
327500 goto err_exit;
328
- timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
329
- mod_timer(&self->service_timer, jiffies +
330
- AQ_CFG_SERVICE_TIMER_INTERVAL);
331501
332
- if (self->aq_nic_cfg.is_polling) {
502
+ INIT_WORK(&self->service_task, aq_nic_service_task);
503
+
504
+ timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
505
+ aq_nic_service_timer_cb(&self->service_timer);
506
+
507
+ if (cfg->is_polling) {
333508 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
334509 mod_timer(&self->polling_timer, jiffies +
335510 AQ_CFG_POLLING_TIMER_INTERVAL);
336511 } else {
337
- for (i = 0U, aq_vec = self->aq_vec[0];
338
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
339
- err = aq_pci_func_alloc_irq(self, i,
340
- self->ndev->name, aq_vec,
512
+ for (i = 0U; self->aq_vecs > i; ++i) {
513
+ aq_vec = self->aq_vec[i];
514
+ err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
515
+ aq_vec_isr, aq_vec,
341516 aq_vec_get_affinity_mask(aq_vec));
342517 if (err < 0)
343518 goto err_exit;
344519 }
345520
521
+ err = aq_ptp_irq_alloc(self);
522
+ if (err < 0)
523
+ goto err_exit;
524
+
525
+ if (cfg->link_irq_vec) {
526
+ int irqvec = pci_irq_vector(self->pdev,
527
+ cfg->link_irq_vec);
528
+ err = request_threaded_irq(irqvec, NULL,
529
+ aq_linkstate_threaded_isr,
530
+ IRQF_SHARED | IRQF_ONESHOT,
531
+ self->ndev->name, self);
532
+ if (err < 0)
533
+ goto err_exit;
534
+ self->msix_entry_mask |= (1 << cfg->link_irq_vec);
535
+ }
536
+
346537 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
347
- AQ_CFG_IRQ_MASK);
538
+ AQ_CFG_IRQ_MASK);
348539 if (err < 0)
349540 goto err_exit;
350541 }
351542
352
- err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
543
+ err = netif_set_real_num_tx_queues(self->ndev,
544
+ self->aq_vecs * cfg->tcs);
353545 if (err < 0)
354546 goto err_exit;
355547
356
- err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
548
+ err = netif_set_real_num_rx_queues(self->ndev,
549
+ self->aq_vecs * cfg->tcs);
357550 if (err < 0)
358551 goto err_exit;
359552
553
+ for (i = 0; i < cfg->tcs; i++) {
554
+ u16 offset = self->aq_vecs * i;
555
+
556
+ netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset);
557
+ }
360558 netif_tx_start_all_queues(self->ndev);
361559
362560 err_exit:
363561 return err;
364562 }
365563
366
-static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
367
- struct sk_buff *skb,
368
- struct aq_ring_s *ring)
564
+unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
565
+ struct aq_ring_s *ring)
369566 {
370
- unsigned int ret = 0U;
371567 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
372
- unsigned int frag_count = 0U;
373
- unsigned int dx = ring->sw_tail;
568
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
569
+ struct device *dev = aq_nic_get_dev(self);
374570 struct aq_ring_buff_s *first = NULL;
375
- struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
571
+ u8 ipver = ip_hdr(skb)->version;
572
+ struct aq_ring_buff_s *dx_buff;
573
+ bool need_context_tag = false;
574
+ unsigned int frag_count = 0U;
575
+ unsigned int ret = 0U;
576
+ unsigned int dx;
577
+ u8 l4proto = 0;
578
+
579
+ if (ipver == 4)
580
+ l4proto = ip_hdr(skb)->protocol;
581
+ else if (ipver == 6)
582
+ l4proto = ipv6_hdr(skb)->nexthdr;
583
+
584
+ dx = ring->sw_tail;
585
+ dx_buff = &ring->buff_ring[dx];
586
+ dx_buff->flags = 0U;
376587
377588 if (unlikely(skb_is_gso(skb))) {
378
- dx_buff->flags = 0U;
589
+ dx_buff->mss = skb_shinfo(skb)->gso_size;
590
+ if (l4proto == IPPROTO_TCP) {
591
+ dx_buff->is_gso_tcp = 1U;
592
+ dx_buff->len_l4 = tcp_hdrlen(skb);
593
+ } else if (l4proto == IPPROTO_UDP) {
594
+ dx_buff->is_gso_udp = 1U;
595
+ dx_buff->len_l4 = sizeof(struct udphdr);
596
+ /* UDP GSO Hardware does not replace packet length. */
597
+ udp_hdr(skb)->len = htons(dx_buff->mss +
598
+ dx_buff->len_l4);
599
+ } else {
600
+ WARN_ONCE(true, "Bad GSO mode");
601
+ goto exit;
602
+ }
379603 dx_buff->len_pkt = skb->len;
380604 dx_buff->len_l2 = ETH_HLEN;
381
- dx_buff->len_l3 = ip_hdrlen(skb);
382
- dx_buff->len_l4 = tcp_hdrlen(skb);
383
- dx_buff->mss = skb_shinfo(skb)->gso_size;
384
- dx_buff->is_txc = 1U;
605
+ dx_buff->len_l3 = skb_network_header_len(skb);
385606 dx_buff->eop_index = 0xffffU;
607
+ dx_buff->is_ipv6 = (ipver == 6);
608
+ need_context_tag = true;
609
+ }
386610
387
- dx_buff->is_ipv6 =
388
- (ip_hdr(skb)->version == 6) ? 1U : 0U;
611
+ if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
612
+ dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
613
+ dx_buff->len_pkt = skb->len;
614
+ dx_buff->is_vlan = 1U;
615
+ need_context_tag = true;
616
+ }
389617
618
+ if (need_context_tag) {
390619 dx = aq_ring_next_dx(ring, dx);
391620 dx_buff = &ring->buff_ring[dx];
621
+ dx_buff->flags = 0U;
392622 ++ret;
393623 }
394624
395
- dx_buff->flags = 0U;
396625 dx_buff->len = skb_headlen(skb);
397
- dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
626
+ dx_buff->pa = dma_map_single(dev,
398627 skb->data,
399628 dx_buff->len,
400629 DMA_TO_DEVICE);
401630
402
- if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
631
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa))) {
403632 ret = 0;
404633 goto exit;
405634 }
....@@ -411,24 +640,9 @@
411640 ++ret;
412641
413642 if (skb->ip_summed == CHECKSUM_PARTIAL) {
414
- dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
415
- 1U : 0U;
416
-
417
- if (ip_hdr(skb)->version == 4) {
418
- dx_buff->is_tcp_cso =
419
- (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
420
- 1U : 0U;
421
- dx_buff->is_udp_cso =
422
- (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
423
- 1U : 0U;
424
- } else if (ip_hdr(skb)->version == 6) {
425
- dx_buff->is_tcp_cso =
426
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
427
- 1U : 0U;
428
- dx_buff->is_udp_cso =
429
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
430
- 1U : 0U;
431
- }
643
+ dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
644
+ dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
645
+ dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
432646 }
433647
434648 for (; nr_frags--; ++frag_count) {
....@@ -446,13 +660,13 @@
446660 else
447661 buff_size = frag_len;
448662
449
- frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
663
+ frag_pa = skb_frag_dma_map(dev,
450664 frag,
451665 buff_offset,
452666 buff_size,
453667 DMA_TO_DEVICE);
454668
455
- if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
669
+ if (unlikely(dma_mapping_error(dev,
456670 frag_pa)))
457671 goto mapping_error;
458672
....@@ -483,14 +697,15 @@
483697 --ret, dx = aq_ring_next_dx(ring, dx)) {
484698 dx_buff = &ring->buff_ring[dx];
485699
486
- if (!dx_buff->is_txc && dx_buff->pa) {
700
+ if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
701
+ !dx_buff->is_vlan && dx_buff->pa) {
487702 if (unlikely(dx_buff->is_sop)) {
488
- dma_unmap_single(aq_nic_get_dev(self),
703
+ dma_unmap_single(dev,
489704 dx_buff->pa,
490705 dx_buff->len,
491706 DMA_TO_DEVICE);
492707 } else {
493
- dma_unmap_page(aq_nic_get_dev(self),
708
+ dma_unmap_page(dev,
494709 dx_buff->pa,
495710 dx_buff->len,
496711 DMA_TO_DEVICE);
....@@ -504,15 +719,16 @@
504719
505720 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
506721 {
722
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
723
+ unsigned int vec = skb->queue_mapping % cfg->vecs;
724
+ unsigned int tc = skb->queue_mapping / cfg->vecs;
507725 struct aq_ring_s *ring = NULL;
508726 unsigned int frags = 0U;
509
- unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
510
- unsigned int tc = 0U;
511727 int err = NETDEV_TX_OK;
512728
513729 frags = skb_shinfo(skb)->nr_frags + 1;
514730
515
- ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
731
+ ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)];
516732
517733 if (frags > AQ_CFG_SKB_FRAGS_MAX) {
518734 dev_kfree_skb_any(skb);
....@@ -521,8 +737,14 @@
521737
522738 aq_ring_update_queue_state(ring);
523739
740
+ if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
741
+ err = NETDEV_TX_BUSY;
742
+ goto err_exit;
743
+ }
744
+
524745 /* Above status update may stop the queue. Check this. */
525
- if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
746
+ if (__netif_subqueue_stopped(self->ndev,
747
+ AQ_NIC_RING2QMAP(self, ring->idx))) {
526748 err = NETDEV_TX_BUSY;
527749 goto err_exit;
528750 }
....@@ -561,9 +783,12 @@
561783
562784 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
563785 {
564
- unsigned int packet_filter = self->packet_filter;
786
+ const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
787
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
788
+ unsigned int packet_filter = ndev->flags;
565789 struct netdev_hw_addr *ha = NULL;
566790 unsigned int i = 0U;
791
+ int err = 0;
567792
568793 self->mc_list.count = 0;
569794 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
....@@ -571,30 +796,30 @@
571796 } else {
572797 netdev_for_each_uc_addr(ha, ndev) {
573798 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
574
-
575
- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
576
- break;
577799 }
578800 }
579801
580
- if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
581
- packet_filter |= IFF_ALLMULTI;
582
- } else {
583
- netdev_for_each_mc_addr(ha, ndev) {
584
- ether_addr_copy(self->mc_list.ar[i++], ha->addr);
585
-
586
- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
587
- break;
802
+ cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
803
+ if (cfg->is_mc_list_enabled) {
804
+ if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
805
+ packet_filter |= IFF_ALLMULTI;
806
+ } else {
807
+ netdev_for_each_mc_addr(ha, ndev) {
808
+ ether_addr_copy(self->mc_list.ar[i++],
809
+ ha->addr);
810
+ }
588811 }
589812 }
590813
591814 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
592
- packet_filter |= IFF_MULTICAST;
593815 self->mc_list.count = i;
594
- self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
595
- self->mc_list.ar,
596
- self->mc_list.count);
816
+ err = hw_ops->hw_multicast_list_set(self->aq_hw,
817
+ self->mc_list.ar,
818
+ self->mc_list.count);
819
+ if (err < 0)
820
+ return err;
597821 }
822
+
598823 return aq_nic_set_packet_filter(self, packet_filter);
599824 }
600825
....@@ -643,12 +868,19 @@
643868 return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
644869 }
645870
646
-void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
871
+u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
647872 {
648
- unsigned int i = 0U;
873
+ struct aq_stats_s *stats;
649874 unsigned int count = 0U;
650
- struct aq_vec_s *aq_vec = NULL;
651
- struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
875
+ unsigned int i = 0U;
876
+ unsigned int tc;
877
+
878
+ if (self->aq_fw_ops->update_stats) {
879
+ mutex_lock(&self->fwreq_mutex);
880
+ self->aq_fw_ops->update_stats(self->aq_hw);
881
+ mutex_unlock(&self->fwreq_mutex);
882
+ }
883
+ stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
652884
653885 if (!stats)
654886 goto err_exit;
....@@ -668,8 +900,14 @@
668900 data[++i] = stats->mbtc;
669901 data[++i] = stats->bbrc;
670902 data[++i] = stats->bbtc;
671
- data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
672
- data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
903
+ if (stats->brc)
904
+ data[++i] = stats->brc;
905
+ else
906
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
907
+ if (stats->btc)
908
+ data[++i] = stats->btc;
909
+ else
910
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
673911 data[++i] = stats->dma_pkt_rc;
674912 data[++i] = stats->dma_pkt_tc;
675913 data[++i] = stats->dma_oct_rc;
....@@ -680,25 +918,32 @@
680918
681919 data += i;
682920
683
- for (i = 0U, aq_vec = self->aq_vec[0];
684
- aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
685
- data += count;
686
- aq_vec_get_sw_stats(aq_vec, data, &count);
921
+ for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
922
+ for (i = 0U; self->aq_vecs > i; ++i) {
923
+ if (!self->aq_vec[i])
924
+ break;
925
+ data += count;
926
+ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
927
+ }
687928 }
688929
689
-err_exit:;
930
+ data += count;
931
+
932
+err_exit:
933
+ return data;
690934 }
691935
692936 static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
693937 {
694
- struct net_device *ndev = self->ndev;
695938 struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
939
+ struct net_device *ndev = self->ndev;
696940
697
- ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
698
- ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
941
+ ndev->stats.rx_packets = stats->dma_pkt_rc;
942
+ ndev->stats.rx_bytes = stats->dma_oct_rc;
699943 ndev->stats.rx_errors = stats->erpr;
700
- ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
701
- ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
944
+ ndev->stats.rx_dropped = stats->dpc;
945
+ ndev->stats.tx_packets = stats->dma_pkt_tc;
946
+ ndev->stats.tx_bytes = stats->dma_oct_tc;
702947 ndev->stats.tx_errors = stats->erpt;
703948 ndev->stats.multicast = stats->mprc;
704949 }
....@@ -706,12 +951,17 @@
706951 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
707952 struct ethtool_link_ksettings *cmd)
708953 {
954
+ u32 lp_link_speed_msk;
955
+
709956 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
710957 cmd->base.port = PORT_FIBRE;
711958 else
712959 cmd->base.port = PORT_TP;
713
- /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
714
- cmd->base.duplex = DUPLEX_FULL;
960
+
961
+ cmd->base.duplex = DUPLEX_UNKNOWN;
962
+ if (self->link_status.mbps)
963
+ cmd->base.duplex = self->link_status.full_duplex ?
964
+ DUPLEX_FULL : DUPLEX_HALF;
715965 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
716966
717967 ethtool_link_ksettings_zero_link_mode(cmd, supported);
....@@ -724,7 +974,7 @@
724974 ethtool_link_ksettings_add_link_mode(cmd, supported,
725975 5000baseT_Full);
726976
727
- if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
977
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
728978 ethtool_link_ksettings_add_link_mode(cmd, supported,
729979 2500baseT_Full);
730980
....@@ -732,13 +982,32 @@
732982 ethtool_link_ksettings_add_link_mode(cmd, supported,
733983 1000baseT_Full);
734984
985
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G_HALF)
986
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
987
+ 1000baseT_Half);
988
+
735989 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
736990 ethtool_link_ksettings_add_link_mode(cmd, supported,
737991 100baseT_Full);
738992
739
- if (self->aq_nic_cfg.aq_hw_caps->flow_control)
993
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M_HALF)
994
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
995
+ 100baseT_Half);
996
+
997
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
998
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
999
+ 10baseT_Full);
1000
+
1001
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M_HALF)
1002
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
1003
+ 10baseT_Half);
1004
+
1005
+ if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
7401006 ethtool_link_ksettings_add_link_mode(cmd, supported,
7411007 Pause);
1008
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
1009
+ Asym_Pause);
1010
+ }
7421011
7431012 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
7441013
....@@ -752,31 +1021,49 @@
7521021 if (self->aq_nic_cfg.is_autoneg)
7531022 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
7541023
755
- if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
1024
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
7561025 ethtool_link_ksettings_add_link_mode(cmd, advertising,
7571026 10000baseT_Full);
7581027
759
- if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
1028
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
7601029 ethtool_link_ksettings_add_link_mode(cmd, advertising,
7611030 5000baseT_Full);
7621031
763
- if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
1032
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
7641033 ethtool_link_ksettings_add_link_mode(cmd, advertising,
7651034 2500baseT_Full);
7661035
767
- if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
1036
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
7681037 ethtool_link_ksettings_add_link_mode(cmd, advertising,
7691038 1000baseT_Full);
7701039
771
- if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
1040
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G_HALF)
1041
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
1042
+ 1000baseT_Half);
1043
+
1044
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
7721045 ethtool_link_ksettings_add_link_mode(cmd, advertising,
7731046 100baseT_Full);
7741047
775
- if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
1048
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M_HALF)
1049
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
1050
+ 100baseT_Half);
1051
+
1052
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
1053
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
1054
+ 10baseT_Full);
1055
+
1056
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M_HALF)
1057
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
1058
+ 10baseT_Half);
1059
+
1060
+ if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
7761061 ethtool_link_ksettings_add_link_mode(cmd, advertising,
7771062 Pause);
7781063
779
- if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
1064
+ /* Asym is when either RX or TX, but not both */
1065
+ if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
1066
+ !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
7801067 ethtool_link_ksettings_add_link_mode(cmd, advertising,
7811068 Asym_Pause);
7821069
....@@ -784,32 +1071,88 @@
7841071 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
7851072 else
7861073 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
1074
+
1075
+ ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
1076
+ lp_link_speed_msk = self->aq_hw->aq_link_status.lp_link_speed_msk;
1077
+
1078
+ if (lp_link_speed_msk & AQ_NIC_RATE_10G)
1079
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1080
+ 10000baseT_Full);
1081
+
1082
+ if (lp_link_speed_msk & AQ_NIC_RATE_5G)
1083
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1084
+ 5000baseT_Full);
1085
+
1086
+ if (lp_link_speed_msk & AQ_NIC_RATE_2G5)
1087
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1088
+ 2500baseT_Full);
1089
+
1090
+ if (lp_link_speed_msk & AQ_NIC_RATE_1G)
1091
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1092
+ 1000baseT_Full);
1093
+
1094
+ if (lp_link_speed_msk & AQ_NIC_RATE_1G_HALF)
1095
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1096
+ 1000baseT_Half);
1097
+
1098
+ if (lp_link_speed_msk & AQ_NIC_RATE_100M)
1099
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1100
+ 100baseT_Full);
1101
+
1102
+ if (lp_link_speed_msk & AQ_NIC_RATE_100M_HALF)
1103
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1104
+ 100baseT_Half);
1105
+
1106
+ if (lp_link_speed_msk & AQ_NIC_RATE_10M)
1107
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1108
+ 10baseT_Full);
1109
+
1110
+ if (lp_link_speed_msk & AQ_NIC_RATE_10M_HALF)
1111
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1112
+ 10baseT_Half);
1113
+
1114
+ if (self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX)
1115
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1116
+ Pause);
1117
+ if (!!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_TX) ^
1118
+ !!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX))
1119
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
1120
+ Asym_Pause);
7871121 }
7881122
7891123 int aq_nic_set_link_ksettings(struct aq_nic_s *self,
7901124 const struct ethtool_link_ksettings *cmd)
7911125 {
792
- u32 speed = 0U;
1126
+ int fduplex = (cmd->base.duplex == DUPLEX_FULL);
1127
+ u32 speed = cmd->base.speed;
7931128 u32 rate = 0U;
7941129 int err = 0;
1130
+
1131
+ if (!fduplex && speed > SPEED_1000) {
1132
+ err = -EINVAL;
1133
+ goto err_exit;
1134
+ }
7951135
7961136 if (cmd->base.autoneg == AUTONEG_ENABLE) {
7971137 rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
7981138 self->aq_nic_cfg.is_autoneg = true;
7991139 } else {
800
- speed = cmd->base.speed;
801
-
8021140 switch (speed) {
1141
+ case SPEED_10:
1142
+ rate = fduplex ? AQ_NIC_RATE_10M : AQ_NIC_RATE_10M_HALF;
1143
+ break;
1144
+
8031145 case SPEED_100:
804
- rate = AQ_NIC_RATE_100M;
1146
+ rate = fduplex ? AQ_NIC_RATE_100M
1147
+ : AQ_NIC_RATE_100M_HALF;
8051148 break;
8061149
8071150 case SPEED_1000:
808
- rate = AQ_NIC_RATE_1G;
1151
+ rate = fduplex ? AQ_NIC_RATE_1G : AQ_NIC_RATE_1G_HALF;
8091152 break;
8101153
8111154 case SPEED_2500:
812
- rate = AQ_NIC_RATE_2GS;
1155
+ rate = AQ_NIC_RATE_2G5;
8131156 break;
8141157
8151158 case SPEED_5000:
....@@ -823,7 +1166,6 @@
8231166 default:
8241167 err = -1;
8251168 goto err_exit;
826
- break;
8271169 }
8281170 if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
8291171 err = -1;
....@@ -833,7 +1175,9 @@
8331175 self->aq_nic_cfg.is_autoneg = false;
8341176 }
8351177
1178
+ mutex_lock(&self->fwreq_mutex);
8361179 err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
1180
+ mutex_unlock(&self->fwreq_mutex);
8371181 if (err < 0)
8381182 goto err_exit;
8391183
....@@ -850,22 +1194,56 @@
8501194
8511195 u32 aq_nic_get_fw_version(struct aq_nic_s *self)
8521196 {
853
- u32 fw_version = 0U;
1197
+ return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
1198
+}
8541199
855
- self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
1200
+int aq_nic_set_loopback(struct aq_nic_s *self)
1201
+{
1202
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
8561203
857
- return fw_version;
1204
+ if (!self->aq_hw_ops->hw_set_loopback ||
1205
+ !self->aq_fw_ops->set_phyloopback)
1206
+ return -EOPNOTSUPP;
1207
+
1208
+ mutex_lock(&self->fwreq_mutex);
1209
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
1210
+ AQ_HW_LOOPBACK_DMA_SYS,
1211
+ !!(cfg->priv_flags &
1212
+ BIT(AQ_HW_LOOPBACK_DMA_SYS)));
1213
+
1214
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
1215
+ AQ_HW_LOOPBACK_PKT_SYS,
1216
+ !!(cfg->priv_flags &
1217
+ BIT(AQ_HW_LOOPBACK_PKT_SYS)));
1218
+
1219
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
1220
+ AQ_HW_LOOPBACK_DMA_NET,
1221
+ !!(cfg->priv_flags &
1222
+ BIT(AQ_HW_LOOPBACK_DMA_NET)));
1223
+
1224
+ self->aq_fw_ops->set_phyloopback(self->aq_hw,
1225
+ AQ_HW_LOOPBACK_PHYINT_SYS,
1226
+ !!(cfg->priv_flags &
1227
+ BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));
1228
+
1229
+ self->aq_fw_ops->set_phyloopback(self->aq_hw,
1230
+ AQ_HW_LOOPBACK_PHYEXT_SYS,
1231
+ !!(cfg->priv_flags &
1232
+ BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
1233
+ mutex_unlock(&self->fwreq_mutex);
1234
+
1235
+ return 0;
8581236 }
8591237
8601238 int aq_nic_stop(struct aq_nic_s *self)
8611239 {
862
- struct aq_vec_s *aq_vec = NULL;
8631240 unsigned int i = 0U;
8641241
8651242 netif_tx_disable(self->ndev);
8661243 netif_carrier_off(self->ndev);
8671244
8681245 del_timer_sync(&self->service_timer);
1246
+ cancel_work_sync(&self->service_task);
8691247
8701248 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
8711249
....@@ -874,14 +1252,30 @@
8741252 else
8751253 aq_pci_func_free_irqs(self);
8761254
877
- for (i = 0U, aq_vec = self->aq_vec[0];
878
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
879
- aq_vec_stop(aq_vec);
1255
+ aq_ptp_irq_free(self);
1256
+
1257
+ for (i = 0U; self->aq_vecs > i; ++i)
1258
+ aq_vec_stop(self->aq_vec[i]);
1259
+
1260
+ aq_ptp_ring_stop(self);
8801261
8811262 return self->aq_hw_ops->hw_stop(self->aq_hw);
8821263 }
8831264
884
-void aq_nic_deinit(struct aq_nic_s *self)
1265
+void aq_nic_set_power(struct aq_nic_s *self)
1266
+{
1267
+ if (self->power_state != AQ_HW_POWER_STATE_D0 ||
1268
+ self->aq_hw->aq_nic_cfg->wol)
1269
+ if (likely(self->aq_fw_ops->set_power)) {
1270
+ mutex_lock(&self->fwreq_mutex);
1271
+ self->aq_fw_ops->set_power(self->aq_hw,
1272
+ self->power_state,
1273
+ self->ndev->dev_addr);
1274
+ mutex_unlock(&self->fwreq_mutex);
1275
+ }
1276
+}
1277
+
1278
+void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
8851279 {
8861280 struct aq_vec_s *aq_vec = NULL;
8871281 unsigned int i = 0U;
....@@ -889,15 +1283,21 @@
8891283 if (!self)
8901284 goto err_exit;
8911285
892
- for (i = 0U, aq_vec = self->aq_vec[0];
893
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
1286
+ for (i = 0U; i < self->aq_vecs; i++) {
1287
+ aq_vec = self->aq_vec[i];
8941288 aq_vec_deinit(aq_vec);
1289
+ aq_vec_ring_free(aq_vec);
1290
+ }
8951291
896
- if (self->power_state == AQ_HW_POWER_STATE_D0) {
897
- (void)self->aq_fw_ops->deinit(self->aq_hw);
898
- } else {
899
- (void)self->aq_hw_ops->hw_set_power(self->aq_hw,
900
- self->power_state);
1292
+ aq_ptp_unregister(self);
1293
+ aq_ptp_ring_deinit(self);
1294
+ aq_ptp_ring_free(self);
1295
+ aq_ptp_free(self);
1296
+
1297
+ if (likely(self->aq_fw_ops->deinit) && link_down) {
1298
+ mutex_lock(&self->fwreq_mutex);
1299
+ self->aq_fw_ops->deinit(self->aq_hw);
1300
+ mutex_unlock(&self->fwreq_mutex);
9011301 }
9021302
9031303 err_exit:;
....@@ -920,42 +1320,20 @@
9201320 err_exit:;
9211321 }
9221322
923
-int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
1323
+int aq_nic_realloc_vectors(struct aq_nic_s *self)
9241324 {
925
- int err = 0;
1325
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
9261326
927
- if (!netif_running(self->ndev)) {
928
- err = 0;
929
- goto out;
930
- }
931
- rtnl_lock();
932
- if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
933
- self->power_state = AQ_HW_POWER_STATE_D3;
934
- netif_device_detach(self->ndev);
935
- netif_tx_stop_all_queues(self->ndev);
1327
+ aq_nic_free_vectors(self);
9361328
937
- err = aq_nic_stop(self);
938
- if (err < 0)
939
- goto err_exit;
940
-
941
- aq_nic_deinit(self);
942
- } else {
943
- err = aq_nic_init(self);
944
- if (err < 0)
945
- goto err_exit;
946
-
947
- err = aq_nic_start(self);
948
- if (err < 0)
949
- goto err_exit;
950
-
951
- netif_device_attach(self->ndev);
952
- netif_tx_start_all_queues(self->ndev);
1329
+ for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) {
1330
+ self->aq_vec[self->aq_vecs] = aq_vec_alloc(self, self->aq_vecs,
1331
+ cfg);
1332
+ if (unlikely(!self->aq_vec[self->aq_vecs]))
1333
+ return -ENOMEM;
9531334 }
9541335
955
-err_exit:
956
- rtnl_unlock();
957
-out:
958
- return err;
1336
+ return 0;
9591337 }
9601338
9611339 void aq_nic_shutdown(struct aq_nic_s *self)
....@@ -974,8 +1352,193 @@
9741352 if (err < 0)
9751353 goto err_exit;
9761354 }
977
- aq_nic_deinit(self);
1355
+ aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
1356
+ aq_nic_set_power(self);
9781357
9791358 err_exit:
9801359 rtnl_unlock();
981
-}
1360
+}
1361
+
1362
+u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
1363
+{
1364
+ u8 location = 0xFF;
1365
+ u32 fltr_cnt;
1366
+ u32 n_bit;
1367
+
1368
+ switch (type) {
1369
+ case aq_rx_filter_ethertype:
1370
+ location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
1371
+ self->aq_hw_rx_fltrs.fet_reserved_count;
1372
+ self->aq_hw_rx_fltrs.fet_reserved_count++;
1373
+ break;
1374
+ case aq_rx_filter_l3l4:
1375
+ fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
1376
+ n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;
1377
+
1378
+ self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
1379
+ self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
1380
+ location = n_bit;
1381
+ break;
1382
+ default:
1383
+ break;
1384
+ }
1385
+
1386
+ return location;
1387
+}
1388
+
1389
+void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
1390
+ u32 location)
1391
+{
1392
+ switch (type) {
1393
+ case aq_rx_filter_ethertype:
1394
+ self->aq_hw_rx_fltrs.fet_reserved_count--;
1395
+ break;
1396
+ case aq_rx_filter_l3l4:
1397
+ self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
1398
+ self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
1399
+ break;
1400
+ default:
1401
+ break;
1402
+ }
1403
+}
1404
+
1405
+int aq_nic_set_downshift(struct aq_nic_s *self, int val)
1406
+{
1407
+ int err = 0;
1408
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1409
+
1410
+ if (!self->aq_fw_ops->set_downshift)
1411
+ return -EOPNOTSUPP;
1412
+
1413
+ if (val > 15) {
1414
+ netdev_err(self->ndev, "downshift counter should be <= 15\n");
1415
+ return -EINVAL;
1416
+ }
1417
+ cfg->downshift_counter = val;
1418
+
1419
+ mutex_lock(&self->fwreq_mutex);
1420
+ err = self->aq_fw_ops->set_downshift(self->aq_hw, cfg->downshift_counter);
1421
+ mutex_unlock(&self->fwreq_mutex);
1422
+
1423
+ return err;
1424
+}
1425
+
1426
+int aq_nic_set_media_detect(struct aq_nic_s *self, int val)
1427
+{
1428
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1429
+ int err = 0;
1430
+
1431
+ if (!self->aq_fw_ops->set_media_detect)
1432
+ return -EOPNOTSUPP;
1433
+
1434
+ if (val > 0 && val != AQ_HW_MEDIA_DETECT_CNT) {
1435
+ netdev_err(self->ndev, "EDPD on this device could have only fixed value of %d\n",
1436
+ AQ_HW_MEDIA_DETECT_CNT);
1437
+ return -EINVAL;
1438
+ }
1439
+
1440
+ mutex_lock(&self->fwreq_mutex);
1441
+ err = self->aq_fw_ops->set_media_detect(self->aq_hw, !!val);
1442
+ mutex_unlock(&self->fwreq_mutex);
1443
+
1444
+ /* msecs plays no role - configuration is always fixed in PHY */
1445
+ if (!err)
1446
+ cfg->is_media_detect = !!val;
1447
+
1448
+ return err;
1449
+}
1450
+
1451
+int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
1452
+{
1453
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1454
+ const unsigned int prev_vecs = cfg->vecs;
1455
+ bool ndev_running;
1456
+ int err = 0;
1457
+ int i;
1458
+
1459
+ /* if already the same configuration or
1460
+ * disable request (tcs is 0) and we already is disabled
1461
+ */
1462
+ if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos))
1463
+ return 0;
1464
+
1465
+ ndev_running = netif_running(self->ndev);
1466
+ if (ndev_running)
1467
+ dev_close(self->ndev);
1468
+
1469
+ cfg->tcs = tcs;
1470
+ if (cfg->tcs == 0)
1471
+ cfg->tcs = 1;
1472
+ if (prio_tc_map)
1473
+ memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map));
1474
+ else
1475
+ for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
1476
+ cfg->prio_tc_map[i] = cfg->tcs * i / 8;
1477
+
1478
+ cfg->is_qos = (tcs != 0 ? true : false);
1479
+ cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
1480
+ if (!cfg->is_ptp)
1481
+ netdev_warn(self->ndev, "%s\n",
1482
+ "PTP is auto disabled due to requested TC count.");
1483
+
1484
+ netdev_set_num_tc(self->ndev, cfg->tcs);
1485
+
1486
+ /* Changing the number of TCs might change the number of vectors */
1487
+ aq_nic_cfg_update_num_vecs(self);
1488
+ if (prev_vecs != cfg->vecs) {
1489
+ err = aq_nic_realloc_vectors(self);
1490
+ if (err)
1491
+ goto err_exit;
1492
+ }
1493
+
1494
+ if (ndev_running)
1495
+ err = dev_open(self->ndev, NULL);
1496
+
1497
+err_exit:
1498
+ return err;
1499
+}
1500
+
1501
+int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
1502
+ const u32 max_rate)
1503
+{
1504
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1505
+
1506
+ if (tc >= AQ_CFG_TCS_MAX)
1507
+ return -EINVAL;
1508
+
1509
+ if (max_rate && max_rate < 10) {
1510
+ netdev_warn(self->ndev,
1511
+ "Setting %s to the minimum usable value of %dMbps.\n",
1512
+ "max rate", 10);
1513
+ cfg->tc_max_rate[tc] = 10;
1514
+ } else {
1515
+ cfg->tc_max_rate[tc] = max_rate;
1516
+ }
1517
+
1518
+ return 0;
1519
+}
1520
+
1521
+int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
1522
+ const u32 min_rate)
1523
+{
1524
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1525
+
1526
+ if (tc >= AQ_CFG_TCS_MAX)
1527
+ return -EINVAL;
1528
+
1529
+ if (min_rate)
1530
+ set_bit(tc, &cfg->tc_min_rate_msk);
1531
+ else
1532
+ clear_bit(tc, &cfg->tc_min_rate_msk);
1533
+
1534
+ if (min_rate && min_rate < 20) {
1535
+ netdev_warn(self->ndev,
1536
+ "Setting %s to the minimum usable value of %dMbps.\n",
1537
+ "min rate", 20);
1538
+ cfg->tc_min_rate[tc] = 20;
1539
+ } else {
1540
+ cfg->tc_min_rate[tc] = min_rate;
1541
+ }
1542
+
1543
+ return 0;
1544
+}