forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/net/ethernet/qlogic/qed/qed_dev.c
....@@ -1,33 +1,7 @@
1
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
12 /* QLogic qed NIC Driver
23 * Copyright (c) 2015-2017 QLogic Corporation
3
- *
4
- * This software is available to you under a choice of one of two
5
- * licenses. You may choose to be licensed under the terms of the GNU
6
- * General Public License (GPL) Version 2, available from the file
7
- * COPYING in the main directory of this source tree, or the
8
- * OpenIB.org BSD license below:
9
- *
10
- * Redistribution and use in source and binary forms, with or
11
- * without modification, are permitted provided that the following
12
- * conditions are met:
13
- *
14
- * - Redistributions of source code must retain the above
15
- * copyright notice, this list of conditions and the following
16
- * disclaimer.
17
- *
18
- * - Redistributions in binary form must reproduce the above
19
- * copyright notice, this list of conditions and the following
20
- * disclaimer in the documentation and /or other materials
21
- * provided with the distribution.
22
- *
23
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
- * SOFTWARE.
4
+ * Copyright (c) 2019-2020 Marvell International Ltd.
315 */
326
337 #include <linux/types.h>
....@@ -65,6 +39,1222 @@
6539 #include "qed_rdma.h"
6640
6741 static DEFINE_SPINLOCK(qm_lock);
42
+
43
+/******************** Doorbell Recovery *******************/
44
+/* The doorbell recovery mechanism consists of a list of entries which represent
45
+ * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
46
+ * entity needs to register with the mechanism and provide the parameters
47
+ * describing it's doorbell, including a location where last used doorbell data
48
+ * can be found. The doorbell execute function will traverse the list and
49
+ * doorbell all of the registered entries.
50
+ */
51
+struct qed_db_recovery_entry {
52
+ struct list_head list_entry;
53
+ void __iomem *db_addr;
54
+ void *db_data;
55
+ enum qed_db_rec_width db_width;
56
+ enum qed_db_rec_space db_space;
57
+ u8 hwfn_idx;
58
+};
59
+
60
+/* Display a single doorbell recovery entry */
61
+static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
62
+ struct qed_db_recovery_entry *db_entry,
63
+ char *action)
64
+{
65
+ DP_VERBOSE(p_hwfn,
66
+ QED_MSG_SPQ,
67
+ "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
68
+ action,
69
+ db_entry,
70
+ db_entry->db_addr,
71
+ db_entry->db_data,
72
+ db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
73
+ db_entry->db_space == DB_REC_USER ? "user" : "kernel",
74
+ db_entry->hwfn_idx);
75
+}
76
+
77
+/* Doorbell address sanity (address within doorbell bar range) */
78
+static bool qed_db_rec_sanity(struct qed_dev *cdev,
79
+ void __iomem *db_addr,
80
+ enum qed_db_rec_width db_width,
81
+ void *db_data)
82
+{
83
+ u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
84
+
85
+ /* Make sure doorbell address is within the doorbell bar */
86
+ if (db_addr < cdev->doorbells ||
87
+ (u8 __iomem *)db_addr + width >
88
+ (u8 __iomem *)cdev->doorbells + cdev->db_size) {
89
+ WARN(true,
90
+ "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
91
+ db_addr,
92
+ cdev->doorbells,
93
+ (u8 __iomem *)cdev->doorbells + cdev->db_size);
94
+ return false;
95
+ }
96
+
97
+ /* ake sure doorbell data pointer is not null */
98
+ if (!db_data) {
99
+ WARN(true, "Illegal doorbell data pointer: %p", db_data);
100
+ return false;
101
+ }
102
+
103
+ return true;
104
+}
105
+
106
+/* Find hwfn according to the doorbell address */
107
+static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev,
108
+ void __iomem *db_addr)
109
+{
110
+ struct qed_hwfn *p_hwfn;
111
+
112
+ /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
113
+ if (cdev->num_hwfns > 1)
114
+ p_hwfn = db_addr < cdev->hwfns[1].doorbells ?
115
+ &cdev->hwfns[0] : &cdev->hwfns[1];
116
+ else
117
+ p_hwfn = QED_LEADING_HWFN(cdev);
118
+
119
+ return p_hwfn;
120
+}
121
+
122
+/* Add a new entry to the doorbell recovery mechanism */
123
+int qed_db_recovery_add(struct qed_dev *cdev,
124
+ void __iomem *db_addr,
125
+ void *db_data,
126
+ enum qed_db_rec_width db_width,
127
+ enum qed_db_rec_space db_space)
128
+{
129
+ struct qed_db_recovery_entry *db_entry;
130
+ struct qed_hwfn *p_hwfn;
131
+
132
+ /* Shortcircuit VFs, for now */
133
+ if (IS_VF(cdev)) {
134
+ DP_VERBOSE(cdev,
135
+ QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
136
+ return 0;
137
+ }
138
+
139
+ /* Sanitize doorbell address */
140
+ if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
141
+ return -EINVAL;
142
+
143
+ /* Obtain hwfn from doorbell address */
144
+ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
145
+
146
+ /* Create entry */
147
+ db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL);
148
+ if (!db_entry) {
149
+ DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n");
150
+ return -ENOMEM;
151
+ }
152
+
153
+ /* Populate entry */
154
+ db_entry->db_addr = db_addr;
155
+ db_entry->db_data = db_data;
156
+ db_entry->db_width = db_width;
157
+ db_entry->db_space = db_space;
158
+ db_entry->hwfn_idx = p_hwfn->my_id;
159
+
160
+ /* Display */
161
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
162
+
163
+ /* Protect the list */
164
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
165
+ list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list);
166
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
167
+
168
+ return 0;
169
+}
170
+
171
+/* Remove an entry from the doorbell recovery mechanism */
172
+int qed_db_recovery_del(struct qed_dev *cdev,
173
+ void __iomem *db_addr, void *db_data)
174
+{
175
+ struct qed_db_recovery_entry *db_entry = NULL;
176
+ struct qed_hwfn *p_hwfn;
177
+ int rc = -EINVAL;
178
+
179
+ /* Shortcircuit VFs, for now */
180
+ if (IS_VF(cdev)) {
181
+ DP_VERBOSE(cdev,
182
+ QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
183
+ return 0;
184
+ }
185
+
186
+ /* Obtain hwfn from doorbell address */
187
+ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
188
+
189
+ /* Protect the list */
190
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
191
+ list_for_each_entry(db_entry,
192
+ &p_hwfn->db_recovery_info.list, list_entry) {
193
+ /* search according to db_data addr since db_addr is not unique (roce) */
194
+ if (db_entry->db_data == db_data) {
195
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
196
+ list_del(&db_entry->list_entry);
197
+ rc = 0;
198
+ break;
199
+ }
200
+ }
201
+
202
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
203
+
204
+ if (rc == -EINVAL)
205
+
206
+ DP_NOTICE(p_hwfn,
207
+ "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
208
+ db_data, db_addr);
209
+ else
210
+ kfree(db_entry);
211
+
212
+ return rc;
213
+}
214
+
215
+/* Initialize the doorbell recovery mechanism */
216
+static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn)
217
+{
218
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n");
219
+
220
+ /* Make sure db_size was set in cdev */
221
+ if (!p_hwfn->cdev->db_size) {
222
+ DP_ERR(p_hwfn->cdev, "db_size not set\n");
223
+ return -EINVAL;
224
+ }
225
+
226
+ INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list);
227
+ spin_lock_init(&p_hwfn->db_recovery_info.lock);
228
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
229
+
230
+ return 0;
231
+}
232
+
233
+/* Destroy the doorbell recovery mechanism */
234
+static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
235
+{
236
+ struct qed_db_recovery_entry *db_entry = NULL;
237
+
238
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n");
239
+ if (!list_empty(&p_hwfn->db_recovery_info.list)) {
240
+ DP_VERBOSE(p_hwfn,
241
+ QED_MSG_SPQ,
242
+ "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
243
+ while (!list_empty(&p_hwfn->db_recovery_info.list)) {
244
+ db_entry =
245
+ list_first_entry(&p_hwfn->db_recovery_info.list,
246
+ struct qed_db_recovery_entry,
247
+ list_entry);
248
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
249
+ list_del(&db_entry->list_entry);
250
+ kfree(db_entry);
251
+ }
252
+ }
253
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
254
+}
255
+
256
+/* Print the content of the doorbell recovery mechanism */
257
+void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
258
+{
259
+ struct qed_db_recovery_entry *db_entry = NULL;
260
+
261
+ DP_NOTICE(p_hwfn,
262
+ "Displaying doorbell recovery database. Counter was %d\n",
263
+ p_hwfn->db_recovery_info.db_recovery_counter);
264
+
265
+ /* Protect the list */
266
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
267
+ list_for_each_entry(db_entry,
268
+ &p_hwfn->db_recovery_info.list, list_entry) {
269
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
270
+ }
271
+
272
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
273
+}
274
+
275
+/* Ring the doorbell of a single doorbell recovery entry */
276
+static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
277
+ struct qed_db_recovery_entry *db_entry)
278
+{
279
+ /* Print according to width */
280
+ if (db_entry->db_width == DB_REC_WIDTH_32B) {
281
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
282
+ "ringing doorbell address %p data %x\n",
283
+ db_entry->db_addr,
284
+ *(u32 *)db_entry->db_data);
285
+ } else {
286
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
287
+ "ringing doorbell address %p data %llx\n",
288
+ db_entry->db_addr,
289
+ *(u64 *)(db_entry->db_data));
290
+ }
291
+
292
+ /* Sanity */
293
+ if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
294
+ db_entry->db_width, db_entry->db_data))
295
+ return;
296
+
297
+ /* Flush the write combined buffer. Since there are multiple doorbelling
298
+ * entities using the same address, if we don't flush, a transaction
299
+ * could be lost.
300
+ */
301
+ wmb();
302
+
303
+ /* Ring the doorbell */
304
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
305
+ DIRECT_REG_WR(db_entry->db_addr,
306
+ *(u32 *)(db_entry->db_data));
307
+ else
308
+ DIRECT_REG_WR64(db_entry->db_addr,
309
+ *(u64 *)(db_entry->db_data));
310
+
311
+ /* Flush the write combined buffer. Next doorbell may come from a
312
+ * different entity to the same address...
313
+ */
314
+ wmb();
315
+}
316
+
317
+/* Traverse the doorbell recovery entry list and ring all the doorbells */
318
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
319
+{
320
+ struct qed_db_recovery_entry *db_entry = NULL;
321
+
322
+ DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
323
+ p_hwfn->db_recovery_info.db_recovery_counter);
324
+
325
+ /* Track amount of times recovery was executed */
326
+ p_hwfn->db_recovery_info.db_recovery_counter++;
327
+
328
+ /* Protect the list */
329
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
330
+ list_for_each_entry(db_entry,
331
+ &p_hwfn->db_recovery_info.list, list_entry)
332
+ qed_db_recovery_ring(p_hwfn, db_entry);
333
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
334
+}
335
+
336
+/******************** Doorbell Recovery end ****************/
337
+
338
+/********************************** NIG LLH ***********************************/
339
+
340
+enum qed_llh_filter_type {
341
+ QED_LLH_FILTER_TYPE_MAC,
342
+ QED_LLH_FILTER_TYPE_PROTOCOL,
343
+};
344
+
345
+struct qed_llh_mac_filter {
346
+ u8 addr[ETH_ALEN];
347
+};
348
+
349
+struct qed_llh_protocol_filter {
350
+ enum qed_llh_prot_filter_type_t type;
351
+ u16 source_port_or_eth_type;
352
+ u16 dest_port;
353
+};
354
+
355
+union qed_llh_filter {
356
+ struct qed_llh_mac_filter mac;
357
+ struct qed_llh_protocol_filter protocol;
358
+};
359
+
360
+struct qed_llh_filter_info {
361
+ bool b_enabled;
362
+ u32 ref_cnt;
363
+ enum qed_llh_filter_type type;
364
+ union qed_llh_filter filter;
365
+};
366
+
367
+struct qed_llh_info {
368
+ /* Number of LLH filters banks */
369
+ u8 num_ppfid;
370
+
371
+#define MAX_NUM_PPFID 8
372
+ u8 ppfid_array[MAX_NUM_PPFID];
373
+
374
+ /* Array of filters arrays:
375
+ * "num_ppfid" elements of filters banks, where each is an array of
376
+ * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters.
377
+ */
378
+ struct qed_llh_filter_info **pp_filters;
379
+};
380
+
381
+static void qed_llh_free(struct qed_dev *cdev)
382
+{
383
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
384
+ u32 i;
385
+
386
+ if (p_llh_info) {
387
+ if (p_llh_info->pp_filters)
388
+ for (i = 0; i < p_llh_info->num_ppfid; i++)
389
+ kfree(p_llh_info->pp_filters[i]);
390
+
391
+ kfree(p_llh_info->pp_filters);
392
+ }
393
+
394
+ kfree(p_llh_info);
395
+ cdev->p_llh_info = NULL;
396
+}
397
+
398
+static int qed_llh_alloc(struct qed_dev *cdev)
399
+{
400
+ struct qed_llh_info *p_llh_info;
401
+ u32 size, i;
402
+
403
+ p_llh_info = kzalloc(sizeof(*p_llh_info), GFP_KERNEL);
404
+ if (!p_llh_info)
405
+ return -ENOMEM;
406
+ cdev->p_llh_info = p_llh_info;
407
+
408
+ for (i = 0; i < MAX_NUM_PPFID; i++) {
409
+ if (!(cdev->ppfid_bitmap & (0x1 << i)))
410
+ continue;
411
+
412
+ p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
413
+ DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %hhd\n",
414
+ p_llh_info->num_ppfid, i);
415
+ p_llh_info->num_ppfid++;
416
+ }
417
+
418
+ size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters);
419
+ p_llh_info->pp_filters = kzalloc(size, GFP_KERNEL);
420
+ if (!p_llh_info->pp_filters)
421
+ return -ENOMEM;
422
+
423
+ size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
424
+ sizeof(**p_llh_info->pp_filters);
425
+ for (i = 0; i < p_llh_info->num_ppfid; i++) {
426
+ p_llh_info->pp_filters[i] = kzalloc(size, GFP_KERNEL);
427
+ if (!p_llh_info->pp_filters[i])
428
+ return -ENOMEM;
429
+ }
430
+
431
+ return 0;
432
+}
433
+
434
+static int qed_llh_shadow_sanity(struct qed_dev *cdev,
435
+ u8 ppfid, u8 filter_idx, const char *action)
436
+{
437
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
438
+
439
+ if (ppfid >= p_llh_info->num_ppfid) {
440
+ DP_NOTICE(cdev,
441
+ "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n",
442
+ action, ppfid, p_llh_info->num_ppfid);
443
+ return -EINVAL;
444
+ }
445
+
446
+ if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
447
+ DP_NOTICE(cdev,
448
+ "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n",
449
+ action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE);
450
+ return -EINVAL;
451
+ }
452
+
453
+ return 0;
454
+}
455
+
456
+#define QED_LLH_INVALID_FILTER_IDX 0xff
457
+
458
+static int
459
+qed_llh_shadow_search_filter(struct qed_dev *cdev,
460
+ u8 ppfid,
461
+ union qed_llh_filter *p_filter, u8 *p_filter_idx)
462
+{
463
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
464
+ struct qed_llh_filter_info *p_filters;
465
+ int rc;
466
+ u8 i;
467
+
468
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "search");
469
+ if (rc)
470
+ return rc;
471
+
472
+ *p_filter_idx = QED_LLH_INVALID_FILTER_IDX;
473
+
474
+ p_filters = p_llh_info->pp_filters[ppfid];
475
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
476
+ if (!memcmp(p_filter, &p_filters[i].filter,
477
+ sizeof(*p_filter))) {
478
+ *p_filter_idx = i;
479
+ break;
480
+ }
481
+ }
482
+
483
+ return 0;
484
+}
485
+
486
+static int
487
+qed_llh_shadow_get_free_idx(struct qed_dev *cdev, u8 ppfid, u8 *p_filter_idx)
488
+{
489
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
490
+ struct qed_llh_filter_info *p_filters;
491
+ int rc;
492
+ u8 i;
493
+
494
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "get_free_idx");
495
+ if (rc)
496
+ return rc;
497
+
498
+ *p_filter_idx = QED_LLH_INVALID_FILTER_IDX;
499
+
500
+ p_filters = p_llh_info->pp_filters[ppfid];
501
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
502
+ if (!p_filters[i].b_enabled) {
503
+ *p_filter_idx = i;
504
+ break;
505
+ }
506
+ }
507
+
508
+ return 0;
509
+}
510
+
511
+static int
512
+__qed_llh_shadow_add_filter(struct qed_dev *cdev,
513
+ u8 ppfid,
514
+ u8 filter_idx,
515
+ enum qed_llh_filter_type type,
516
+ union qed_llh_filter *p_filter, u32 *p_ref_cnt)
517
+{
518
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
519
+ struct qed_llh_filter_info *p_filters;
520
+ int rc;
521
+
522
+ rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "add");
523
+ if (rc)
524
+ return rc;
525
+
526
+ p_filters = p_llh_info->pp_filters[ppfid];
527
+ if (!p_filters[filter_idx].ref_cnt) {
528
+ p_filters[filter_idx].b_enabled = true;
529
+ p_filters[filter_idx].type = type;
530
+ memcpy(&p_filters[filter_idx].filter, p_filter,
531
+ sizeof(p_filters[filter_idx].filter));
532
+ }
533
+
534
+ *p_ref_cnt = ++p_filters[filter_idx].ref_cnt;
535
+
536
+ return 0;
537
+}
538
+
539
+static int
540
+qed_llh_shadow_add_filter(struct qed_dev *cdev,
541
+ u8 ppfid,
542
+ enum qed_llh_filter_type type,
543
+ union qed_llh_filter *p_filter,
544
+ u8 *p_filter_idx, u32 *p_ref_cnt)
545
+{
546
+ int rc;
547
+
548
+ /* Check if the same filter already exist */
549
+ rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx);
550
+ if (rc)
551
+ return rc;
552
+
553
+ /* Find a new entry in case of a new filter */
554
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
555
+ rc = qed_llh_shadow_get_free_idx(cdev, ppfid, p_filter_idx);
556
+ if (rc)
557
+ return rc;
558
+ }
559
+
560
+ /* No free entry was found */
561
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
562
+ DP_NOTICE(cdev,
563
+ "Failed to find an empty LLH filter to utilize [ppfid %d]\n",
564
+ ppfid);
565
+ return -EINVAL;
566
+ }
567
+
568
+ return __qed_llh_shadow_add_filter(cdev, ppfid, *p_filter_idx, type,
569
+ p_filter, p_ref_cnt);
570
+}
571
+
572
+static int
573
+__qed_llh_shadow_remove_filter(struct qed_dev *cdev,
574
+ u8 ppfid, u8 filter_idx, u32 *p_ref_cnt)
575
+{
576
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
577
+ struct qed_llh_filter_info *p_filters;
578
+ int rc;
579
+
580
+ rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "remove");
581
+ if (rc)
582
+ return rc;
583
+
584
+ p_filters = p_llh_info->pp_filters[ppfid];
585
+ if (!p_filters[filter_idx].ref_cnt) {
586
+ DP_NOTICE(cdev,
587
+ "LLH shadow: trying to remove a filter with ref_cnt=0\n");
588
+ return -EINVAL;
589
+ }
590
+
591
+ *p_ref_cnt = --p_filters[filter_idx].ref_cnt;
592
+ if (!p_filters[filter_idx].ref_cnt)
593
+ memset(&p_filters[filter_idx],
594
+ 0, sizeof(p_filters[filter_idx]));
595
+
596
+ return 0;
597
+}
598
+
599
+static int
600
+qed_llh_shadow_remove_filter(struct qed_dev *cdev,
601
+ u8 ppfid,
602
+ union qed_llh_filter *p_filter,
603
+ u8 *p_filter_idx, u32 *p_ref_cnt)
604
+{
605
+ int rc;
606
+
607
+ rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx);
608
+ if (rc)
609
+ return rc;
610
+
611
+ /* No matching filter was found */
612
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
613
+ DP_NOTICE(cdev, "Failed to find a filter in the LLH shadow\n");
614
+ return -EINVAL;
615
+ }
616
+
617
+ return __qed_llh_shadow_remove_filter(cdev, ppfid, *p_filter_idx,
618
+ p_ref_cnt);
619
+}
620
+
621
+static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid)
622
+{
623
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
624
+
625
+ if (ppfid >= p_llh_info->num_ppfid) {
626
+ DP_NOTICE(cdev,
627
+ "ppfid %d is not valid, available indices are 0..%hhd\n",
628
+ ppfid, p_llh_info->num_ppfid - 1);
629
+ *p_abs_ppfid = 0;
630
+ return -EINVAL;
631
+ }
632
+
633
+ *p_abs_ppfid = p_llh_info->ppfid_array[ppfid];
634
+
635
+ return 0;
636
+}
637
+
638
+static int
639
+qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
640
+{
641
+ struct qed_dev *cdev = p_hwfn->cdev;
642
+ enum qed_eng eng;
643
+ u8 ppfid;
644
+ int rc;
645
+
646
+ rc = qed_mcp_get_engine_config(p_hwfn, p_ptt);
647
+ if (rc != 0 && rc != -EOPNOTSUPP) {
648
+ DP_NOTICE(p_hwfn,
649
+ "Failed to get the engine affinity configuration\n");
650
+ return rc;
651
+ }
652
+
653
+ /* RoCE PF is bound to a single engine */
654
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
655
+ eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
656
+ rc = qed_llh_set_roce_affinity(cdev, eng);
657
+ if (rc) {
658
+ DP_NOTICE(cdev,
659
+ "Failed to set the RoCE engine affinity\n");
660
+ return rc;
661
+ }
662
+
663
+ DP_VERBOSE(cdev,
664
+ QED_MSG_SP,
665
+ "LLH: Set the engine affinity of RoCE packets as %d\n",
666
+ eng);
667
+ }
668
+
669
+ /* Storage PF is bound to a single engine while L2 PF uses both */
670
+ if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn))
671
+ eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
672
+ else /* L2_PERSONALITY */
673
+ eng = QED_BOTH_ENG;
674
+
675
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
676
+ rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
677
+ if (rc) {
678
+ DP_NOTICE(cdev,
679
+ "Failed to set the engine affinity of ppfid %d\n",
680
+ ppfid);
681
+ return rc;
682
+ }
683
+ }
684
+
685
+ DP_VERBOSE(cdev, QED_MSG_SP,
686
+ "LLH: Set the engine affinity of non-RoCE packets as %d\n",
687
+ eng);
688
+
689
+ return 0;
690
+}
691
+
692
+static int qed_llh_hw_init_pf(struct qed_hwfn *p_hwfn,
693
+ struct qed_ptt *p_ptt)
694
+{
695
+ struct qed_dev *cdev = p_hwfn->cdev;
696
+ u8 ppfid, abs_ppfid;
697
+ int rc;
698
+
699
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
700
+ u32 addr;
701
+
702
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
703
+ if (rc)
704
+ return rc;
705
+
706
+ addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
707
+ qed_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
708
+ }
709
+
710
+ if (test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) &&
711
+ !QED_IS_FCOE_PERSONALITY(p_hwfn)) {
712
+ rc = qed_llh_add_mac_filter(cdev, 0,
713
+ p_hwfn->hw_info.hw_mac_addr);
714
+ if (rc)
715
+ DP_NOTICE(cdev,
716
+ "Failed to add an LLH filter with the primary MAC\n");
717
+ }
718
+
719
+ if (QED_IS_CMT(cdev)) {
720
+ rc = qed_llh_set_engine_affin(p_hwfn, p_ptt);
721
+ if (rc)
722
+ return rc;
723
+ }
724
+
725
+ return 0;
726
+}
727
+
728
+u8 qed_llh_get_num_ppfid(struct qed_dev *cdev)
729
+{
730
+ return cdev->p_llh_info->num_ppfid;
731
+}
732
+
733
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3
734
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0
735
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3
736
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2
737
+
738
+int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng)
739
+{
740
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
741
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
742
+ u32 addr, val, eng_sel;
743
+ u8 abs_ppfid;
744
+ int rc = 0;
745
+
746
+ if (!p_ptt)
747
+ return -EAGAIN;
748
+
749
+ if (!QED_IS_CMT(cdev))
750
+ goto out;
751
+
752
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
753
+ if (rc)
754
+ goto out;
755
+
756
+ switch (eng) {
757
+ case QED_ENG0:
758
+ eng_sel = 0;
759
+ break;
760
+ case QED_ENG1:
761
+ eng_sel = 1;
762
+ break;
763
+ case QED_BOTH_ENG:
764
+ eng_sel = 2;
765
+ break;
766
+ default:
767
+ DP_NOTICE(cdev, "Invalid affinity value for ppfid [%d]\n", eng);
768
+ rc = -EINVAL;
769
+ goto out;
770
+ }
771
+
772
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
773
+ val = qed_rd(p_hwfn, p_ptt, addr);
774
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel);
775
+ qed_wr(p_hwfn, p_ptt, addr, val);
776
+
777
+ /* The iWARP affinity is set as the affinity of ppfid 0 */
778
+ if (!ppfid && QED_IS_IWARP_PERSONALITY(p_hwfn))
779
+ cdev->iwarp_affin = (eng == QED_ENG1) ? 1 : 0;
780
+out:
781
+ qed_ptt_release(p_hwfn, p_ptt);
782
+
783
+ return rc;
784
+}
785
+
786
+int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng)
787
+{
788
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
789
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
790
+ u32 addr, val, eng_sel;
791
+ u8 ppfid, abs_ppfid;
792
+ int rc = 0;
793
+
794
+ if (!p_ptt)
795
+ return -EAGAIN;
796
+
797
+ if (!QED_IS_CMT(cdev))
798
+ goto out;
799
+
800
+ switch (eng) {
801
+ case QED_ENG0:
802
+ eng_sel = 0;
803
+ break;
804
+ case QED_ENG1:
805
+ eng_sel = 1;
806
+ break;
807
+ case QED_BOTH_ENG:
808
+ eng_sel = 2;
809
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL,
810
+ 0xf); /* QP bit 15 */
811
+ break;
812
+ default:
813
+ DP_NOTICE(cdev, "Invalid affinity value for RoCE [%d]\n", eng);
814
+ rc = -EINVAL;
815
+ goto out;
816
+ }
817
+
818
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
819
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
820
+ if (rc)
821
+ goto out;
822
+
823
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
824
+ val = qed_rd(p_hwfn, p_ptt, addr);
825
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel);
826
+ qed_wr(p_hwfn, p_ptt, addr, val);
827
+ }
828
+out:
829
+ qed_ptt_release(p_hwfn, p_ptt);
830
+
831
+ return rc;
832
+}
833
+
834
+struct qed_llh_filter_details {
835
+ u64 value;
836
+ u32 mode;
837
+ u32 protocol_type;
838
+ u32 hdr_sel;
839
+ u32 enable;
840
+};
841
+
842
+static int
843
+qed_llh_access_filter(struct qed_hwfn *p_hwfn,
844
+ struct qed_ptt *p_ptt,
845
+ u8 abs_ppfid,
846
+ u8 filter_idx,
847
+ struct qed_llh_filter_details *p_details)
848
+{
849
+ struct qed_dmae_params params = {0};
850
+ u32 addr;
851
+ u8 pfid;
852
+ int rc;
853
+
854
+ /* The NIG/LLH registers that are accessed in this function have only 16
855
+ * rows which are exposed to a PF. I.e. only the 16 filters of its
856
+ * default ppfid. Accessing filters of other ppfids requires pretending
857
+ * to another PFs.
858
+ * The calculation of PPFID->PFID in AH is based on the relative index
859
+ * of a PF on its port.
860
+ * For BB the pfid is actually the abs_ppfid.
861
+ */
862
+ if (QED_IS_BB(p_hwfn->cdev))
863
+ pfid = abs_ppfid;
864
+ else
865
+ pfid = abs_ppfid * p_hwfn->cdev->num_ports_in_engine +
866
+ MFW_PORT(p_hwfn);
867
+
868
+ /* Filter enable - should be done first when removing a filter */
869
+ if (!p_details->enable) {
870
+ qed_fid_pretend(p_hwfn, p_ptt,
871
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
872
+
873
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
874
+ qed_wr(p_hwfn, p_ptt, addr, p_details->enable);
875
+
876
+ qed_fid_pretend(p_hwfn, p_ptt,
877
+ p_hwfn->rel_pf_id <<
878
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
879
+ }
880
+
881
+ /* Filter value */
882
+ addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
883
+
884
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1);
885
+ params.dst_pfid = pfid;
886
+ rc = qed_dmae_host2grc(p_hwfn,
887
+ p_ptt,
888
+ (u64)(uintptr_t)&p_details->value,
889
+ addr, 2 /* size_in_dwords */,
890
+ &params);
891
+ if (rc)
892
+ return rc;
893
+
894
+ qed_fid_pretend(p_hwfn, p_ptt,
895
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
896
+
897
+ /* Filter mode */
898
+ addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4;
899
+ qed_wr(p_hwfn, p_ptt, addr, p_details->mode);
900
+
901
+ /* Filter protocol type */
902
+ addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4;
903
+ qed_wr(p_hwfn, p_ptt, addr, p_details->protocol_type);
904
+
905
+ /* Filter header select */
906
+ addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4;
907
+ qed_wr(p_hwfn, p_ptt, addr, p_details->hdr_sel);
908
+
909
+ /* Filter enable - should be done last when adding a filter */
910
+ if (p_details->enable) {
911
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
912
+ qed_wr(p_hwfn, p_ptt, addr, p_details->enable);
913
+ }
914
+
915
+ qed_fid_pretend(p_hwfn, p_ptt,
916
+ p_hwfn->rel_pf_id <<
917
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
918
+
919
+ return 0;
920
+}
921
+
922
+static int
923
+qed_llh_add_filter(struct qed_hwfn *p_hwfn,
924
+ struct qed_ptt *p_ptt,
925
+ u8 abs_ppfid,
926
+ u8 filter_idx, u8 filter_prot_type, u32 high, u32 low)
927
+{
928
+ struct qed_llh_filter_details filter_details;
929
+
930
+ filter_details.enable = 1;
931
+ filter_details.value = ((u64)high << 32) | low;
932
+ filter_details.hdr_sel = 0;
933
+ filter_details.protocol_type = filter_prot_type;
934
+ /* Mode: 0: MAC-address classification 1: protocol classification */
935
+ filter_details.mode = filter_prot_type ? 1 : 0;
936
+
937
+ return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
938
+ &filter_details);
939
+}
940
+
941
+static int
942
+qed_llh_remove_filter(struct qed_hwfn *p_hwfn,
943
+ struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
944
+{
945
+ struct qed_llh_filter_details filter_details = {0};
946
+
947
+ return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
948
+ &filter_details);
949
+}
950
+
951
+int qed_llh_add_mac_filter(struct qed_dev *cdev,
952
+ u8 ppfid, u8 mac_addr[ETH_ALEN])
953
+{
954
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
955
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
956
+ union qed_llh_filter filter = {};
957
+ u8 filter_idx, abs_ppfid = 0;
958
+ u32 high, low, ref_cnt;
959
+ int rc = 0;
960
+
961
+ if (!p_ptt)
962
+ return -EAGAIN;
963
+
964
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
965
+ goto out;
966
+
967
+ memcpy(filter.mac.addr, mac_addr, ETH_ALEN);
968
+ rc = qed_llh_shadow_add_filter(cdev, ppfid,
969
+ QED_LLH_FILTER_TYPE_MAC,
970
+ &filter, &filter_idx, &ref_cnt);
971
+ if (rc)
972
+ goto err;
973
+
974
+ /* Configure the LLH only in case of a new the filter */
975
+ if (ref_cnt == 1) {
976
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
977
+ if (rc)
978
+ goto err;
979
+
980
+ high = mac_addr[1] | (mac_addr[0] << 8);
981
+ low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) |
982
+ (mac_addr[2] << 24);
983
+ rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
984
+ 0, high, low);
985
+ if (rc)
986
+ goto err;
987
+ }
988
+
989
+ DP_VERBOSE(cdev,
990
+ QED_MSG_SP,
991
+ "LLH: Added MAC filter [%pM] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
992
+ mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt);
993
+
994
+ goto out;
995
+
996
+err: DP_NOTICE(cdev,
997
+ "LLH: Failed to add MAC filter [%pM] to ppfid %hhd\n",
998
+ mac_addr, ppfid);
999
+out:
1000
+ qed_ptt_release(p_hwfn, p_ptt);
1001
+
1002
+ return rc;
1003
+}
1004
+
1005
+static int
1006
+qed_llh_protocol_filter_stringify(struct qed_dev *cdev,
1007
+ enum qed_llh_prot_filter_type_t type,
1008
+ u16 source_port_or_eth_type,
1009
+ u16 dest_port, u8 *str, size_t str_len)
1010
+{
1011
+ switch (type) {
1012
+ case QED_LLH_FILTER_ETHERTYPE:
1013
+ snprintf(str, str_len, "Ethertype 0x%04x",
1014
+ source_port_or_eth_type);
1015
+ break;
1016
+ case QED_LLH_FILTER_TCP_SRC_PORT:
1017
+ snprintf(str, str_len, "TCP src port 0x%04x",
1018
+ source_port_or_eth_type);
1019
+ break;
1020
+ case QED_LLH_FILTER_UDP_SRC_PORT:
1021
+ snprintf(str, str_len, "UDP src port 0x%04x",
1022
+ source_port_or_eth_type);
1023
+ break;
1024
+ case QED_LLH_FILTER_TCP_DEST_PORT:
1025
+ snprintf(str, str_len, "TCP dst port 0x%04x", dest_port);
1026
+ break;
1027
+ case QED_LLH_FILTER_UDP_DEST_PORT:
1028
+ snprintf(str, str_len, "UDP dst port 0x%04x", dest_port);
1029
+ break;
1030
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
1031
+ snprintf(str, str_len, "TCP src/dst ports 0x%04x/0x%04x",
1032
+ source_port_or_eth_type, dest_port);
1033
+ break;
1034
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
1035
+ snprintf(str, str_len, "UDP src/dst ports 0x%04x/0x%04x",
1036
+ source_port_or_eth_type, dest_port);
1037
+ break;
1038
+ default:
1039
+ DP_NOTICE(cdev,
1040
+ "Non valid LLH protocol filter type %d\n", type);
1041
+ return -EINVAL;
1042
+ }
1043
+
1044
+ return 0;
1045
+}
1046
+
1047
+static int
1048
+qed_llh_protocol_filter_to_hilo(struct qed_dev *cdev,
1049
+ enum qed_llh_prot_filter_type_t type,
1050
+ u16 source_port_or_eth_type,
1051
+ u16 dest_port, u32 *p_high, u32 *p_low)
1052
+{
1053
+ *p_high = 0;
1054
+ *p_low = 0;
1055
+
1056
+ switch (type) {
1057
+ case QED_LLH_FILTER_ETHERTYPE:
1058
+ *p_high = source_port_or_eth_type;
1059
+ break;
1060
+ case QED_LLH_FILTER_TCP_SRC_PORT:
1061
+ case QED_LLH_FILTER_UDP_SRC_PORT:
1062
+ *p_low = source_port_or_eth_type << 16;
1063
+ break;
1064
+ case QED_LLH_FILTER_TCP_DEST_PORT:
1065
+ case QED_LLH_FILTER_UDP_DEST_PORT:
1066
+ *p_low = dest_port;
1067
+ break;
1068
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
1069
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
1070
+ *p_low = (source_port_or_eth_type << 16) | dest_port;
1071
+ break;
1072
+ default:
1073
+ DP_NOTICE(cdev,
1074
+ "Non valid LLH protocol filter type %d\n", type);
1075
+ return -EINVAL;
1076
+ }
1077
+
1078
+ return 0;
1079
+}
1080
+
1081
+int
1082
+qed_llh_add_protocol_filter(struct qed_dev *cdev,
1083
+ u8 ppfid,
1084
+ enum qed_llh_prot_filter_type_t type,
1085
+ u16 source_port_or_eth_type, u16 dest_port)
1086
+{
1087
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1088
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1089
+ u8 filter_idx, abs_ppfid, str[32], type_bitmap;
1090
+ union qed_llh_filter filter = {};
1091
+ u32 high, low, ref_cnt;
1092
+ int rc = 0;
1093
+
1094
+ if (!p_ptt)
1095
+ return -EAGAIN;
1096
+
1097
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits))
1098
+ goto out;
1099
+
1100
+ rc = qed_llh_protocol_filter_stringify(cdev, type,
1101
+ source_port_or_eth_type,
1102
+ dest_port, str, sizeof(str));
1103
+ if (rc)
1104
+ goto err;
1105
+
1106
+ filter.protocol.type = type;
1107
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
1108
+ filter.protocol.dest_port = dest_port;
1109
+ rc = qed_llh_shadow_add_filter(cdev,
1110
+ ppfid,
1111
+ QED_LLH_FILTER_TYPE_PROTOCOL,
1112
+ &filter, &filter_idx, &ref_cnt);
1113
+ if (rc)
1114
+ goto err;
1115
+
1116
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
1117
+ if (rc)
1118
+ goto err;
1119
+
1120
+ /* Configure the LLH only in case of a new the filter */
1121
+ if (ref_cnt == 1) {
1122
+ rc = qed_llh_protocol_filter_to_hilo(cdev, type,
1123
+ source_port_or_eth_type,
1124
+ dest_port, &high, &low);
1125
+ if (rc)
1126
+ goto err;
1127
+
1128
+ type_bitmap = 0x1 << type;
1129
+ rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid,
1130
+ filter_idx, type_bitmap, high, low);
1131
+ if (rc)
1132
+ goto err;
1133
+ }
1134
+
1135
+ DP_VERBOSE(cdev,
1136
+ QED_MSG_SP,
1137
+ "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1138
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
1139
+
1140
+ goto out;
1141
+
1142
+err: DP_NOTICE(p_hwfn,
1143
+ "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n",
1144
+ str, ppfid);
1145
+out:
1146
+ qed_ptt_release(p_hwfn, p_ptt);
1147
+
1148
+ return rc;
1149
+}
1150
+
1151
+void qed_llh_remove_mac_filter(struct qed_dev *cdev,
1152
+ u8 ppfid, u8 mac_addr[ETH_ALEN])
1153
+{
1154
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1155
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1156
+ union qed_llh_filter filter = {};
1157
+ u8 filter_idx, abs_ppfid;
1158
+ int rc = 0;
1159
+ u32 ref_cnt;
1160
+
1161
+ if (!p_ptt)
1162
+ return;
1163
+
1164
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
1165
+ goto out;
1166
+
1167
+ ether_addr_copy(filter.mac.addr, mac_addr);
1168
+ rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
1169
+ &ref_cnt);
1170
+ if (rc)
1171
+ goto err;
1172
+
1173
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
1174
+ if (rc)
1175
+ goto err;
1176
+
1177
+ /* Remove from the LLH in case the filter is not in use */
1178
+ if (!ref_cnt) {
1179
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
1180
+ filter_idx);
1181
+ if (rc)
1182
+ goto err;
1183
+ }
1184
+
1185
+ DP_VERBOSE(cdev,
1186
+ QED_MSG_SP,
1187
+ "LLH: Removed MAC filter [%pM] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1188
+ mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt);
1189
+
1190
+ goto out;
1191
+
1192
+err: DP_NOTICE(cdev,
1193
+ "LLH: Failed to remove MAC filter [%pM] from ppfid %hhd\n",
1194
+ mac_addr, ppfid);
1195
+out:
1196
+ qed_ptt_release(p_hwfn, p_ptt);
1197
+}
1198
+
1199
+void qed_llh_remove_protocol_filter(struct qed_dev *cdev,
1200
+ u8 ppfid,
1201
+ enum qed_llh_prot_filter_type_t type,
1202
+ u16 source_port_or_eth_type, u16 dest_port)
1203
+{
1204
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1205
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1206
+ u8 filter_idx, abs_ppfid, str[32];
1207
+ union qed_llh_filter filter = {};
1208
+ int rc = 0;
1209
+ u32 ref_cnt;
1210
+
1211
+ if (!p_ptt)
1212
+ return;
1213
+
1214
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits))
1215
+ goto out;
1216
+
1217
+ rc = qed_llh_protocol_filter_stringify(cdev, type,
1218
+ source_port_or_eth_type,
1219
+ dest_port, str, sizeof(str));
1220
+ if (rc)
1221
+ goto err;
1222
+
1223
+ filter.protocol.type = type;
1224
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
1225
+ filter.protocol.dest_port = dest_port;
1226
+ rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
1227
+ &ref_cnt);
1228
+ if (rc)
1229
+ goto err;
1230
+
1231
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
1232
+ if (rc)
1233
+ goto err;
1234
+
1235
+ /* Remove from the LLH in case the filter is not in use */
1236
+ if (!ref_cnt) {
1237
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
1238
+ filter_idx);
1239
+ if (rc)
1240
+ goto err;
1241
+ }
1242
+
1243
+ DP_VERBOSE(cdev,
1244
+ QED_MSG_SP,
1245
+ "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1246
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
1247
+
1248
+ goto out;
1249
+
1250
+err: DP_NOTICE(cdev,
1251
+ "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n",
1252
+ str, ppfid);
1253
+out:
1254
+ qed_ptt_release(p_hwfn, p_ptt);
1255
+}
1256
+
1257
+/******************************* NIG LLH - End ********************************/
681258
691259 #define QED_MIN_DPIS (4)
701260 #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
....@@ -144,8 +1334,16 @@
1441334 qm_info->wfq_data = NULL;
1451335 }
1461336
1337
+static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
1338
+{
1339
+ kfree(p_hwfn->dbg_user_info);
1340
+ p_hwfn->dbg_user_info = NULL;
1341
+}
1342
+
1471343 void qed_resc_free(struct qed_dev *cdev)
1481344 {
1345
+ struct qed_rdma_info *rdma_info;
1346
+ struct qed_hwfn *p_hwfn;
1491347 int i;
1501348
1511349 if (IS_VF(cdev)) {
....@@ -160,8 +1358,11 @@
1601358 kfree(cdev->reset_stats);
1611359 cdev->reset_stats = NULL;
1621360
1361
+ qed_llh_free(cdev);
1362
+
1631363 for_each_hwfn(cdev, i) {
164
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1364
+ p_hwfn = cdev->hwfns + i;
1365
+ rdma_info = p_hwfn->p_rdma_info;
1651366
1661367 qed_cxt_mngr_free(p_hwfn);
1671368 qed_qm_info_free(p_hwfn);
....@@ -180,13 +1381,20 @@
1801381 qed_ooo_free(p_hwfn);
1811382 }
1821383
183
- if (QED_IS_RDMA_PERSONALITY(p_hwfn))
1384
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
1385
+ qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
1841386 qed_rdma_info_free(p_hwfn);
1387
+ }
1851388
1861389 qed_iov_free(p_hwfn);
1871390 qed_l2_free(p_hwfn);
1881391 qed_dmae_info_free(p_hwfn);
1891392 qed_dcbx_info_free(p_hwfn);
1393
+ qed_dbg_user_data_free(p_hwfn);
1394
+ qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
1395
+
1396
+ /* Destroy doorbell recovery mechanism */
1397
+ qed_db_recovery_teardown(p_hwfn);
1901398 }
1911399 }
1921400
....@@ -343,7 +1551,7 @@
3431551
3441552 /* all vports participate in weighted fair queueing */
3451553 for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
346
- qm_info->qm_vport_params[i].vport_wfq = 1;
1554
+ qm_info->qm_vport_params[i].wfq = 1;
3471555 }
3481556
3491557 /* initialize qm port params */
....@@ -351,6 +1559,7 @@
3511559 {
3521560 /* Initialize qm port parameters */
3531561 u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
1562
+ struct qed_dev *cdev = p_hwfn->cdev;
3541563
3551564 /* indicate how ooo and high pri traffic is dealt with */
3561565 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
....@@ -360,11 +1569,13 @@
3601569 for (i = 0; i < num_ports; i++) {
3611570 struct init_qm_port_params *p_qm_port =
3621571 &p_hwfn->qm_info.qm_port_params[i];
1572
+ u16 pbf_max_cmd_lines;
3631573
3641574 p_qm_port->active = 1;
3651575 p_qm_port->active_phys_tcs = active_phys_tcs;
366
- p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
367
- p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
1576
+ pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev);
1577
+ p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;
1578
+ p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports;
3681579 }
3691580 }
3701581
....@@ -740,7 +1951,7 @@
7401951 return 0;
7411952
7421953 if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
743
- p_hwfn->hw_info.multi_tc_roce_en = 0;
1954
+ p_hwfn->hw_info.multi_tc_roce_en = false;
7441955 DP_NOTICE(p_hwfn,
7451956 "multi-tc roce was disabled to reduce requested amount of pqs\n");
7461957 if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
....@@ -806,9 +2017,8 @@
8062017 vport = &(qm_info->qm_vport_params[i]);
8072018 DP_VERBOSE(p_hwfn,
8082019 NETIF_MSG_HW,
809
- "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
810
- qm_info->start_vport + i,
811
- vport->vport_rl, vport->vport_wfq);
2020
+ "vport idx %d, wfq %d, first_tx_pq_id [ ",
2021
+ qm_info->start_vport + i, vport->wfq);
8122022 for (tc = 0; tc < NUM_OF_TCS; tc++)
8132023 DP_VERBOSE(p_hwfn,
8142024 NETIF_MSG_HW,
....@@ -821,11 +2031,11 @@
8212031 pq = &(qm_info->qm_pq_params[i]);
8222032 DP_VERBOSE(p_hwfn,
8232033 NETIF_MSG_HW,
824
- "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
2034
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n",
8252035 qm_info->start_pq + i,
8262036 pq->port_id,
8272037 pq->vport_id,
828
- pq->tc_id, pq->wrr_group, pq->rl_valid);
2038
+ pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);
8292039 }
8302040 }
8312041
....@@ -874,9 +2084,6 @@
8742084 spin_unlock_bh(&qm_lock);
8752085 if (!b_rc)
8762086 return -EINVAL;
877
-
878
- /* clear the QM_PF runtime phase leftovers from previous init */
879
- qed_init_clear_rt_data(p_hwfn);
8802087
8812088 /* prepare QM portion of runtime array */
8822089 qed_qm_init_pf(p_hwfn, p_ptt, false);
....@@ -962,6 +2169,11 @@
9622169 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
9632170 u32 n_eqes, num_cons;
9642171
2172
+ /* Initialize the doorbell recovery mechanism */
2173
+ rc = qed_db_recovery_setup(p_hwfn);
2174
+ if (rc)
2175
+ goto alloc_err;
2176
+
9652177 /* First allocate the context manager structure */
9662178 rc = qed_cxt_mngr_alloc(p_hwfn);
9672179 if (rc)
....@@ -1036,6 +2248,7 @@
10362248 /* EQ */
10372249 n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
10382250 if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
2251
+ u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn);
10392252 enum protocol_type rdma_proto;
10402253
10412254 if (QED_IS_ROCE_PERSONALITY(p_hwfn))
....@@ -1046,7 +2259,10 @@
10462259 num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
10472260 rdma_proto,
10482261 NULL) * 2;
1049
- n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
2262
+ /* EQ should be able to get events from all SRQ's
2263
+ * at the same time
2264
+ */
2265
+ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
10502266 } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
10512267 num_cons =
10522268 qed_cxt_get_proto_cid_count(p_hwfn,
....@@ -1112,6 +2328,17 @@
11122328 rc = qed_dcbx_info_alloc(p_hwfn);
11132329 if (rc)
11142330 goto alloc_err;
2331
+
2332
+ rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info);
2333
+ if (rc)
2334
+ goto alloc_err;
2335
+ }
2336
+
2337
+ rc = qed_llh_alloc(cdev);
2338
+ if (rc) {
2339
+ DP_NOTICE(cdev,
2340
+ "Failed to allocate memory for the llh_info structure\n");
2341
+ goto alloc_err;
11152342 }
11162343
11172344 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
....@@ -1379,7 +2606,7 @@
13792606 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
13802607 params.pf_rl_en = qm_info->pf_rl_en;
13812608 params.pf_wfq_en = qm_info->pf_wfq_en;
1382
- params.vport_rl_en = qm_info->vport_rl_en;
2609
+ params.global_rl_en = qm_info->vport_rl_en;
13832610 params.vport_wfq_en = qm_info->vport_wfq_en;
13842611 params.port_params = qm_info->qm_port_params;
13852612
....@@ -1457,6 +2684,14 @@
14572684 QED_ROCE_EDPM_MODE_DISABLE = 2,
14582685 };
14592686
2687
+bool qed_edpm_enabled(struct qed_hwfn *p_hwfn)
2688
+{
2689
+ if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
2690
+ return false;
2691
+
2692
+ return true;
2693
+}
2694
+
14602695 static int
14612696 qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
14622697 {
....@@ -1526,13 +2761,13 @@
15262761 p_hwfn->wid_count = (u16) n_cpus;
15272762
15282763 DP_INFO(p_hwfn,
1529
- "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
2764
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
15302765 norm_regsize,
15312766 pwm_regsize,
15322767 p_hwfn->dpi_size,
15332768 p_hwfn->dpi_count,
1534
- ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
1535
- "disabled" : "enabled");
2769
+ (!qed_edpm_enabled(p_hwfn)) ?
2770
+ "disabled" : "enabled", PAGE_SIZE);
15362771
15372772 if (rc) {
15382773 DP_ERR(p_hwfn,
....@@ -1556,6 +2791,10 @@
15562791 struct qed_ptt *p_ptt, int hw_mode)
15572792 {
15582793 int rc = 0;
2794
+
2795
+ /* In CMT the gate should be cleared by the 2nd hwfn */
2796
+ if (!QED_IS_CMT(p_hwfn->cdev) || !IS_LEAD_HWFN(p_hwfn))
2797
+ STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
15592798
15602799 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
15612800 if (rc)
....@@ -1620,11 +2859,6 @@
16202859 (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
16212860 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
16222861
1623
- /* Cleanup chip from previous driver if such remains exist */
1624
- rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
1625
- if (rc)
1626
- return rc;
1627
-
16282862 /* Sanity check before the PF init sequence that uses DMAE */
16292863 rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
16302864 if (rc)
....@@ -1640,12 +2874,21 @@
16402874 if (rc)
16412875 return rc;
16422876
2877
+ qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem);
2878
+
16432879 /* Pure runtime initializations - directly to the HW */
16442880 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
16452881
16462882 rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
16472883 if (rc)
16482884 return rc;
2885
+
2886
+ /* Use the leading hwfn since in CMT only NIG #0 is operational */
2887
+ if (IS_LEAD_HWFN(p_hwfn)) {
2888
+ rc = qed_llh_hw_init_pf(p_hwfn, p_ptt);
2889
+ if (rc)
2890
+ return rc;
2891
+ }
16492892
16502893 if (b_hw_start) {
16512894 /* enable interrupts */
....@@ -1668,17 +2911,15 @@
16682911 return rc;
16692912 }
16702913
1671
-static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
1672
- struct qed_ptt *p_ptt,
1673
- u8 enable)
2914
+int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
2915
+ struct qed_ptt *p_ptt, bool b_enable)
16742916 {
1675
- u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
2917
+ u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
16762918
1677
- /* Change PF in PXP */
1678
- qed_wr(p_hwfn, p_ptt,
1679
- PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
2919
+ /* Configure the PF's internal FID_enable for master transactions */
2920
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
16802921
1681
- /* wait until value is set - try for 1 second every 50us */
2922
+ /* Wait until value is set - try for 1 second every 50us */
16822923 for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
16832924 val = qed_rd(p_hwfn, p_ptt,
16842925 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
....@@ -1732,14 +2973,22 @@
17322973 return 0;
17332974 }
17342975
2976
+static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2977
+{
2978
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
2979
+ BIT(p_hwfn->abs_pf_id));
2980
+}
2981
+
17352982 int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
17362983 {
17372984 struct qed_load_req_params load_req_params;
17382985 u32 load_code, resp, param, drv_mb_param;
17392986 bool b_default_mtu = true;
17402987 struct qed_hwfn *p_hwfn;
1741
- int rc = 0, mfw_rc, i;
2988
+ const u32 *fw_overlays;
2989
+ u32 fw_overlays_len;
17422990 u16 ether_type;
2991
+ int rc = 0, i;
17432992
17442993 if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
17452994 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
....@@ -1753,7 +3002,7 @@
17533002 }
17543003
17553004 for_each_hwfn(cdev, i) {
1756
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3005
+ p_hwfn = &cdev->hwfns[i];
17573006
17583007 /* If management didn't provide a default, set one of our own */
17593008 if (!p_hwfn->hw_info.mtu) {
....@@ -1765,9 +3014,6 @@
17653014 qed_vf_start(p_hwfn, p_params);
17663015 continue;
17673016 }
1768
-
1769
- /* Enable DMAE in PXP */
1770
- rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
17713017
17723018 rc = qed_calc_hw_mode(p_hwfn);
17733019 if (rc)
....@@ -1805,12 +3051,57 @@
18053051 "Load request was sent. Load code: 0x%x\n",
18063052 load_code);
18073053
3054
+ /* Only relevant for recovery:
3055
+ * Clear the indication after LOAD_REQ is responded by the MFW.
3056
+ */
3057
+ cdev->recov_in_prog = false;
3058
+
18083059 qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
18093060
18103061 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
18113062
1812
- p_hwfn->first_on_engine = (load_code ==
1813
- FW_MSG_CODE_DRV_LOAD_ENGINE);
3063
+ /* Clean up chip from previous driver if such remains exist.
3064
+ * This is not needed when the PF is the first one on the
3065
+ * engine, since afterwards we are going to init the FW.
3066
+ */
3067
+ if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
3068
+ rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
3069
+ p_hwfn->rel_pf_id, false);
3070
+ if (rc) {
3071
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt,
3072
+ QED_HW_ERR_RAMROD_FAIL,
3073
+ "Final cleanup failed\n");
3074
+ goto load_err;
3075
+ }
3076
+ }
3077
+
3078
+ /* Log and clear previous pglue_b errors if such exist */
3079
+ qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
3080
+
3081
+ /* Enable the PF's internal FID_enable in the PXP */
3082
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
3083
+ true);
3084
+ if (rc)
3085
+ goto load_err;
3086
+
3087
+ /* Clear the pglue_b was_error indication.
3088
+ * In E4 it must be done after the BME and the internal
3089
+ * FID_enable for the PF are set, since VDMs may cause the
3090
+ * indication to be set again.
3091
+ */
3092
+ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
3093
+
3094
+ fw_overlays = cdev->fw_data->fw_overlays;
3095
+ fw_overlays_len = cdev->fw_data->fw_overlays_len;
3096
+ p_hwfn->fw_overlay_mem =
3097
+ qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays,
3098
+ fw_overlays_len);
3099
+ if (!p_hwfn->fw_overlay_mem) {
3100
+ DP_NOTICE(p_hwfn,
3101
+ "Failed to allocate fw overlay memory\n");
3102
+ rc = -ENOMEM;
3103
+ goto load_err;
3104
+ }
18143105
18153106 switch (load_code) {
18163107 case FW_MSG_CODE_DRV_LOAD_ENGINE:
....@@ -1818,14 +3109,14 @@
18183109 p_hwfn->hw_info.hw_mode);
18193110 if (rc)
18203111 break;
1821
- /* Fall through */
3112
+ fallthrough;
18223113 case FW_MSG_CODE_DRV_LOAD_PORT:
18233114 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
18243115 p_hwfn->hw_info.hw_mode);
18253116 if (rc)
18263117 break;
18273118
1828
- /* Fall through */
3119
+ fallthrough;
18293120 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
18303121 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
18313122 p_params->p_tunn,
....@@ -1841,39 +3132,29 @@
18413132 break;
18423133 }
18433134
1844
- if (rc)
3135
+ if (rc) {
18453136 DP_NOTICE(p_hwfn,
18463137 "init phase failed for loadcode 0x%x (rc %d)\n",
1847
- load_code, rc);
1848
-
1849
- /* ACK mfw regardless of success or failure of initialization */
1850
- mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1851
- DRV_MSG_CODE_LOAD_DONE,
1852
- 0, &load_code, &param);
1853
- if (rc)
1854
- return rc;
1855
- if (mfw_rc) {
1856
- DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
1857
- return mfw_rc;
3138
+ load_code, rc);
3139
+ goto load_err;
18583140 }
18593141
1860
- /* Check if there is a DID mismatch between nvm-cfg/efuse */
1861
- if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1862
- DP_NOTICE(p_hwfn,
1863
- "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
3142
+ rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
3143
+ if (rc)
3144
+ return rc;
18643145
18653146 /* send DCBX attention request command */
18663147 DP_VERBOSE(p_hwfn,
18673148 QED_MSG_DCB,
18683149 "sending phony dcbx set command to trigger DCBx attention handling\n");
1869
- mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1870
- DRV_MSG_CODE_SET_DCBX,
1871
- 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
1872
- &load_code, &param);
1873
- if (mfw_rc) {
3150
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
3151
+ DRV_MSG_CODE_SET_DCBX,
3152
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
3153
+ &resp, &param);
3154
+ if (rc) {
18743155 DP_NOTICE(p_hwfn,
18753156 "Failed to send DCBX attention request\n");
1876
- return mfw_rc;
3157
+ return rc;
18773158 }
18783159
18793160 p_hwfn->hw_init_done = true;
....@@ -1922,6 +3203,12 @@
19223203 }
19233204
19243205 return 0;
3206
+
3207
+load_err:
3208
+ /* The MFW load lock should be released also when initialization fails.
3209
+ */
3210
+ qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
3211
+ return rc;
19253212 }
19263213
19273214 #define QED_HW_STOP_RETRY_LIMIT (10)
....@@ -1933,6 +3220,9 @@
19333220 /* close timers */
19343221 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
19353222 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
3223
+
3224
+ if (cdev->recov_in_prog)
3225
+ return;
19363226
19373227 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
19383228 if ((!qed_rd(p_hwfn, p_ptt,
....@@ -1996,12 +3286,14 @@
19963286 p_hwfn->hw_init_done = false;
19973287
19983288 /* Send unload command to MCP */
1999
- rc = qed_mcp_unload_req(p_hwfn, p_ptt);
2000
- if (rc) {
2001
- DP_NOTICE(p_hwfn,
2002
- "Failed sending a UNLOAD_REQ command. rc = %d.\n",
2003
- rc);
2004
- rc2 = -EINVAL;
3289
+ if (!cdev->recov_in_prog) {
3290
+ rc = qed_mcp_unload_req(p_hwfn, p_ptt);
3291
+ if (rc) {
3292
+ DP_NOTICE(p_hwfn,
3293
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
3294
+ rc);
3295
+ rc2 = -EINVAL;
3296
+ }
20053297 }
20063298
20073299 qed_slowpath_irq_sync(p_hwfn);
....@@ -2043,27 +3335,37 @@
20433335 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
20443336 qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
20453337
2046
- qed_mcp_unload_done(p_hwfn, p_ptt);
2047
- if (rc) {
2048
- DP_NOTICE(p_hwfn,
2049
- "Failed sending a UNLOAD_DONE command. rc = %d.\n",
2050
- rc);
2051
- rc2 = -EINVAL;
3338
+ if (IS_LEAD_HWFN(p_hwfn) &&
3339
+ test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) &&
3340
+ !QED_IS_FCOE_PERSONALITY(p_hwfn))
3341
+ qed_llh_remove_mac_filter(cdev, 0,
3342
+ p_hwfn->hw_info.hw_mac_addr);
3343
+
3344
+ if (!cdev->recov_in_prog) {
3345
+ rc = qed_mcp_unload_done(p_hwfn, p_ptt);
3346
+ if (rc) {
3347
+ DP_NOTICE(p_hwfn,
3348
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
3349
+ rc);
3350
+ rc2 = -EINVAL;
3351
+ }
20523352 }
20533353 }
20543354
2055
- if (IS_PF(cdev)) {
3355
+ if (IS_PF(cdev) && !cdev->recov_in_prog) {
20563356 p_hwfn = QED_LEADING_HWFN(cdev);
20573357 p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
20583358
2059
- /* Disable DMAE in PXP - in CMT, this should only be done for
2060
- * first hw-function, and only after all transactions have
2061
- * stopped for all active hw-functions.
3359
+ /* Clear the PF's internal FID_enable in the PXP.
3360
+ * In CMT this should only be done for first hw-function, and
3361
+ * only after all transactions have stopped for all active
3362
+ * hw-functions.
20623363 */
2063
- rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
3364
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
20643365 if (rc) {
20653366 DP_NOTICE(p_hwfn,
2066
- "qed_change_pci_hwfn failed. rc = %d.\n", rc);
3367
+ "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
3368
+ rc);
20673369 rc2 = -EINVAL;
20683370 }
20693371 }
....@@ -2163,9 +3465,8 @@
21633465 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
21643466 }
21653467
2166
- /* Clean Previous errors if such exist */
2167
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2168
- PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
3468
+ /* Clean previous pglue_b errors if such exist */
3469
+ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
21693470
21703471 /* enable internal target-read */
21713472 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
....@@ -2266,8 +3567,10 @@
22663567 return "RDMA_CNQ_RAM";
22673568 case QED_ILT:
22683569 return "ILT";
2269
- case QED_LL2_QUEUE:
2270
- return "LL2_QUEUE";
3570
+ case QED_LL2_RAM_QUEUE:
3571
+ return "LL2_RAM_QUEUE";
3572
+ case QED_LL2_CTX_QUEUE:
3573
+ return "LL2_CTX_QUEUE";
22713574 case QED_CMDQS_CQS:
22723575 return "CMDQS_CQS";
22733576 case QED_RDMA_STATS_QUEUE:
....@@ -2306,18 +3609,46 @@
23063609 return 0;
23073610 }
23083611
3612
+static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = {
3613
+ {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
3614
+ {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
3615
+ {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
3616
+ {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,},
3617
+ {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
3618
+ {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
3619
+ {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
3620
+ {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
3621
+ {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
3622
+ {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
3623
+ {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
3624
+ {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
3625
+ {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
3626
+};
3627
+
3628
+u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
3629
+{
3630
+ enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
3631
+
3632
+ if (type >= QED_NUM_HSI_DEFS) {
3633
+ DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type);
3634
+ return 0;
3635
+ }
3636
+
3637
+ return qed_hsi_def_val[type][chip_id];
3638
+}
23093639 static int
23103640 qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
23113641 {
2312
- bool b_ah = QED_IS_AH(p_hwfn->cdev);
23133642 u32 resc_max_val, mcp_resp;
23143643 u8 res_id;
23153644 int rc;
2316
-
23173645 for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
23183646 switch (res_id) {
2319
- case QED_LL2_QUEUE:
2320
- resc_max_val = MAX_NUM_LL2_RX_QUEUES;
3647
+ case QED_LL2_RAM_QUEUE:
3648
+ resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES;
3649
+ break;
3650
+ case QED_LL2_CTX_QUEUE:
3651
+ resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES;
23213652 break;
23223653 case QED_RDMA_CNQ_RAM:
23233654 /* No need for a case for QED_CMDQS_CQS since
....@@ -2326,8 +3657,8 @@
23263657 resc_max_val = NUM_OF_GLOBAL_QUEUES;
23273658 break;
23283659 case QED_RDMA_STATS_QUEUE:
2329
- resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
2330
- : RDMA_NUM_STATISTIC_COUNTERS_BB;
3660
+ resc_max_val =
3661
+ NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev);
23313662 break;
23323663 case QED_BDQ:
23333664 resc_max_val = BDQ_NUM_RESOURCES;
....@@ -2360,28 +3691,24 @@
23603691 u32 *p_resc_num, u32 *p_resc_start)
23613692 {
23623693 u8 num_funcs = p_hwfn->num_funcs_on_engine;
2363
- bool b_ah = QED_IS_AH(p_hwfn->cdev);
3694
+ struct qed_dev *cdev = p_hwfn->cdev;
23643695
23653696 switch (res_id) {
23663697 case QED_L2_QUEUE:
2367
- *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
2368
- MAX_NUM_L2_QUEUES_BB) / num_funcs;
3698
+ *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs;
23693699 break;
23703700 case QED_VPORT:
2371
- *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
2372
- MAX_NUM_VPORTS_BB) / num_funcs;
3701
+ *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs;
23733702 break;
23743703 case QED_RSS_ENG:
2375
- *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
2376
- ETH_RSS_ENGINE_NUM_BB) / num_funcs;
3704
+ *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs;
23773705 break;
23783706 case QED_PQ:
2379
- *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
2380
- MAX_QM_TX_QUEUES_BB) / num_funcs;
3707
+ *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs;
23813708 *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
23823709 break;
23833710 case QED_RL:
2384
- *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
3711
+ *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs;
23853712 break;
23863713 case QED_MAC:
23873714 case QED_VLAN:
....@@ -2389,11 +3716,13 @@
23893716 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
23903717 break;
23913718 case QED_ILT:
2392
- *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
2393
- PXP_NUM_ILT_RECORDS_BB) / num_funcs;
3719
+ *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs;
23943720 break;
2395
- case QED_LL2_QUEUE:
2396
- *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
3721
+ case QED_LL2_RAM_QUEUE:
3722
+ *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
3723
+ break;
3724
+ case QED_LL2_CTX_QUEUE:
3725
+ *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs;
23973726 break;
23983727 case QED_RDMA_CNQ_RAM:
23993728 case QED_CMDQS_CQS:
....@@ -2401,8 +3730,7 @@
24013730 *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
24023731 break;
24033732 case QED_RDMA_STATS_QUEUE:
2404
- *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
2405
- RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
3733
+ *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs;
24063734 break;
24073735 case QED_BDQ:
24083736 if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
....@@ -2515,6 +3843,36 @@
25153843 return 0;
25163844 }
25173845
3846
+static int qed_hw_get_ppfid_bitmap(struct qed_hwfn *p_hwfn,
3847
+ struct qed_ptt *p_ptt)
3848
+{
3849
+ struct qed_dev *cdev = p_hwfn->cdev;
3850
+ u8 native_ppfid_idx;
3851
+ int rc;
3852
+
3853
+ /* Calculation of BB/AH is different for native_ppfid_idx */
3854
+ if (QED_IS_BB(cdev))
3855
+ native_ppfid_idx = p_hwfn->rel_pf_id;
3856
+ else
3857
+ native_ppfid_idx = p_hwfn->rel_pf_id /
3858
+ cdev->num_ports_in_engine;
3859
+
3860
+ rc = qed_mcp_get_ppfid_bitmap(p_hwfn, p_ptt);
3861
+ if (rc != 0 && rc != -EOPNOTSUPP)
3862
+ return rc;
3863
+ else if (rc == -EOPNOTSUPP)
3864
+ cdev->ppfid_bitmap = 0x1 << native_ppfid_idx;
3865
+
3866
+ if (!(cdev->ppfid_bitmap & (0x1 << native_ppfid_idx))) {
3867
+ DP_INFO(p_hwfn,
3868
+ "Fix the PPFID bitmap to include the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n",
3869
+ native_ppfid_idx, cdev->ppfid_bitmap);
3870
+ cdev->ppfid_bitmap = 0x1 << native_ppfid_idx;
3871
+ }
3872
+
3873
+ return 0;
3874
+}
3875
+
25183876 static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
25193877 {
25203878 struct qed_resc_unlock_params resc_unlock_params;
....@@ -2572,6 +3930,13 @@
25723930 "Failed to release the resource lock for the resource allocation commands\n");
25733931 }
25743932
3933
+ /* PPFID bitmap */
3934
+ if (IS_LEAD_HWFN(p_hwfn)) {
3935
+ rc = qed_hw_get_ppfid_bitmap(p_hwfn, p_ptt);
3936
+ if (rc)
3937
+ return rc;
3938
+ }
3939
+
25753940 /* Sanity for ILT */
25763941 if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
25773942 (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
....@@ -2603,10 +3968,12 @@
26033968
26043969 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
26053970 {
2606
- u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
3971
+ u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld;
26073972 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
3973
+ struct qed_mcp_link_speed_params *ext_speed;
26083974 struct qed_mcp_link_capabilities *p_caps;
26093975 struct qed_mcp_link_params *link;
3976
+ int i;
26103977
26113978 /* Read global nvm_cfg address */
26123979 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
....@@ -2629,37 +3996,21 @@
26293996 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
26303997 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
26313998 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
2632
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
2633
- break;
26343999 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
2635
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
2636
- break;
26374000 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
2638
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
2639
- break;
26404001 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
2641
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
2642
- break;
26434002 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
2644
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
2645
- break;
26464003 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
2647
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
2648
- break;
26494004 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
2650
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
2651
- break;
26524005 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
2653
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
2654
- break;
26554006 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
2656
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
2657
- break;
26584007 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
2659
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
2660
- break;
26614008 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
2662
- p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
4009
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1:
4010
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1:
4011
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2:
4012
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2:
4013
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4:
26634014 break;
26644015 default:
26654016 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
....@@ -2677,8 +4028,7 @@
26774028 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
26784029 link->speed.advertised_speeds = link_temp;
26794030
2680
- link_temp = link->speed.advertised_speeds;
2681
- p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
4031
+ p_caps->speed_capabilities = link->speed.advertised_speeds;
26824032
26834033 link_temp = qed_rd(p_hwfn, p_ptt,
26844034 port_cfg_addr +
....@@ -2693,6 +4043,9 @@
26934043 break;
26944044 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
26954045 link->speed.forced_speed = 10000;
4046
+ break;
4047
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
4048
+ link->speed.forced_speed = 20000;
26964049 break;
26974050 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
26984051 link->speed.forced_speed = 25000;
....@@ -2710,18 +4063,39 @@
27104063 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
27114064 }
27124065
2713
- p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
2714
- link->speed.autoneg;
4066
+ p_caps->default_speed_autoneg = link->speed.autoneg;
27154067
2716
- link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
2717
- link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
2718
- link->pause.autoneg = !!(link_temp &
2719
- NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
2720
- link->pause.forced_rx = !!(link_temp &
2721
- NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
2722
- link->pause.forced_tx = !!(link_temp &
2723
- NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
4068
+ fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL);
4069
+ link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
4070
+ link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
4071
+ link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
27244072 link->loopback_mode = 0;
4073
+
4074
+ if (p_hwfn->mcp_info->capabilities &
4075
+ FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
4076
+ switch (GET_MFW_FIELD(link_temp,
4077
+ NVM_CFG1_PORT_FEC_FORCE_MODE)) {
4078
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE:
4079
+ p_caps->fec_default |= QED_FEC_MODE_NONE;
4080
+ break;
4081
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE:
4082
+ p_caps->fec_default |= QED_FEC_MODE_FIRECODE;
4083
+ break;
4084
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_RS:
4085
+ p_caps->fec_default |= QED_FEC_MODE_RS;
4086
+ break;
4087
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO:
4088
+ p_caps->fec_default |= QED_FEC_MODE_AUTO;
4089
+ break;
4090
+ default:
4091
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4092
+ "unknown FEC mode in 0x%08x\n", link_temp);
4093
+ }
4094
+ } else {
4095
+ p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED;
4096
+ }
4097
+
4098
+ link->fec = p_caps->fec_default;
27254099
27264100 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
27274101 link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr +
....@@ -2754,14 +4128,97 @@
27544128 p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED;
27554129 }
27564130
2757
- DP_VERBOSE(p_hwfn,
2758
- NETIF_MSG_LINK,
2759
- "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
2760
- link->speed.forced_speed,
2761
- link->speed.advertised_speeds,
2762
- link->speed.autoneg,
2763
- link->pause.autoneg,
2764
- p_caps->default_eee, p_caps->eee_lpi_timer);
4131
+ if (p_hwfn->mcp_info->capabilities &
4132
+ FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
4133
+ ext_speed = &link->ext_speed;
4134
+
4135
+ link_temp = qed_rd(p_hwfn, p_ptt,
4136
+ port_cfg_addr +
4137
+ offsetof(struct nvm_cfg1_port,
4138
+ extended_speed));
4139
+
4140
+ fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED);
4141
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN)
4142
+ ext_speed->autoneg = true;
4143
+
4144
+ ext_speed->forced_speed = 0;
4145
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G)
4146
+ ext_speed->forced_speed |= QED_EXT_SPEED_1G;
4147
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G)
4148
+ ext_speed->forced_speed |= QED_EXT_SPEED_10G;
4149
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G)
4150
+ ext_speed->forced_speed |= QED_EXT_SPEED_20G;
4151
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G)
4152
+ ext_speed->forced_speed |= QED_EXT_SPEED_25G;
4153
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G)
4154
+ ext_speed->forced_speed |= QED_EXT_SPEED_40G;
4155
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R)
4156
+ ext_speed->forced_speed |= QED_EXT_SPEED_50G_R;
4157
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2)
4158
+ ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2;
4159
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2)
4160
+ ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2;
4161
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4)
4162
+ ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4;
4163
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4)
4164
+ ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4;
4165
+
4166
+ fld = GET_MFW_FIELD(link_temp,
4167
+ NVM_CFG1_PORT_EXTENDED_SPEED_CAP);
4168
+
4169
+ ext_speed->advertised_speeds = 0;
4170
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED)
4171
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES;
4172
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G)
4173
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G;
4174
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G)
4175
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G;
4176
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G)
4177
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G;
4178
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G)
4179
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G;
4180
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G)
4181
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G;
4182
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R)
4183
+ ext_speed->advertised_speeds |=
4184
+ QED_EXT_SPEED_MASK_50G_R;
4185
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2)
4186
+ ext_speed->advertised_speeds |=
4187
+ QED_EXT_SPEED_MASK_50G_R2;
4188
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2)
4189
+ ext_speed->advertised_speeds |=
4190
+ QED_EXT_SPEED_MASK_100G_R2;
4191
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4)
4192
+ ext_speed->advertised_speeds |=
4193
+ QED_EXT_SPEED_MASK_100G_R4;
4194
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4)
4195
+ ext_speed->advertised_speeds |=
4196
+ QED_EXT_SPEED_MASK_100G_P4;
4197
+
4198
+ link_temp = qed_rd(p_hwfn, p_ptt,
4199
+ port_cfg_addr +
4200
+ offsetof(struct nvm_cfg1_port,
4201
+ extended_fec_mode));
4202
+ link->ext_fec_mode = link_temp;
4203
+
4204
+ p_caps->default_ext_speed_caps = ext_speed->advertised_speeds;
4205
+ p_caps->default_ext_speed = ext_speed->forced_speed;
4206
+ p_caps->default_ext_autoneg = ext_speed->autoneg;
4207
+ p_caps->default_ext_fec = link->ext_fec_mode;
4208
+
4209
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4210
+ "Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n",
4211
+ ext_speed->forced_speed,
4212
+ ext_speed->advertised_speeds, ext_speed->autoneg,
4213
+ p_caps->default_ext_fec);
4214
+ }
4215
+
4216
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4217
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n",
4218
+ link->speed.forced_speed, link->speed.advertised_speeds,
4219
+ link->speed.autoneg, link->pause.autoneg,
4220
+ p_caps->default_eee, p_caps->eee_lpi_timer,
4221
+ p_caps->fec_default);
27654222
27664223 if (IS_LEAD_HWFN(p_hwfn)) {
27674224 struct qed_dev *cdev = p_hwfn->cdev;
....@@ -2784,18 +4241,21 @@
27844241 cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
27854242 BIT(QED_MF_LLH_PROTO_CLSS) |
27864243 BIT(QED_MF_UFP_SPECIFIC) |
2787
- BIT(QED_MF_8021Q_TAGGING);
4244
+ BIT(QED_MF_8021Q_TAGGING) |
4245
+ BIT(QED_MF_DONT_ADD_VLAN0_TAG);
27884246 break;
27894247 case NVM_CFG1_GLOB_MF_MODE_BD:
27904248 cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
27914249 BIT(QED_MF_LLH_PROTO_CLSS) |
2792
- BIT(QED_MF_8021AD_TAGGING);
4250
+ BIT(QED_MF_8021AD_TAGGING) |
4251
+ BIT(QED_MF_DONT_ADD_VLAN0_TAG);
27934252 break;
27944253 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
27954254 cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
27964255 BIT(QED_MF_LLH_PROTO_CLSS) |
27974256 BIT(QED_MF_LL2_NON_UNICAST) |
2798
- BIT(QED_MF_INTER_PF_SWITCH);
4257
+ BIT(QED_MF_INTER_PF_SWITCH) |
4258
+ BIT(QED_MF_DISABLE_ARFS);
27994259 break;
28004260 case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
28014261 cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
....@@ -2808,6 +4268,14 @@
28084268
28094269 DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
28104270 cdev->mf_bits);
4271
+
4272
+ /* In CMT the PF is unknown when the GFS block processes the
4273
+ * packet. Therefore cannot use searcher as it has a per PF
4274
+ * database, and thus ARFS must be disabled.
4275
+ *
4276
+ */
4277
+ if (QED_IS_CMT(cdev))
4278
+ cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS);
28114279 }
28124280
28134281 DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
....@@ -2831,6 +4299,14 @@
28314299 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
28324300 __set_bit(QED_DEV_CAP_ROCE,
28334301 &p_hwfn->hw_info.device_capabilities);
4302
+
4303
+ /* Read device serial number information from shmem */
4304
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
4305
+ offsetof(struct nvm_cfg1, glob) +
4306
+ offsetof(struct nvm_cfg1_glob, serial_number);
4307
+
4308
+ for (i = 0; i < 4; i++)
4309
+ p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4);
28344310
28354311 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
28364312 }
....@@ -2896,55 +4372,43 @@
28964372 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
28974373 }
28984374
2899
-static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
2900
- struct qed_ptt *p_ptt)
2901
-{
2902
- u32 port_mode;
2903
-
2904
- port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
2905
-
2906
- if (port_mode < 3) {
2907
- p_hwfn->cdev->num_ports_in_engine = 1;
2908
- } else if (port_mode <= 5) {
2909
- p_hwfn->cdev->num_ports_in_engine = 2;
2910
- } else {
2911
- DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
2912
- p_hwfn->cdev->num_ports_in_engine);
2913
-
2914
- /* Default num_ports_in_engine to something */
2915
- p_hwfn->cdev->num_ports_in_engine = 1;
2916
- }
2917
-}
2918
-
2919
-static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
2920
- struct qed_ptt *p_ptt)
2921
-{
2922
- u32 port;
2923
- int i;
2924
-
2925
- p_hwfn->cdev->num_ports_in_engine = 0;
2926
-
2927
- for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
2928
- port = qed_rd(p_hwfn, p_ptt,
2929
- CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
2930
- if (port & 1)
2931
- p_hwfn->cdev->num_ports_in_engine++;
2932
- }
2933
-
2934
- if (!p_hwfn->cdev->num_ports_in_engine) {
2935
- DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
2936
-
2937
- /* Default num_ports_in_engine to something */
2938
- p_hwfn->cdev->num_ports_in_engine = 1;
2939
- }
2940
-}
2941
-
29424375 static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
29434376 {
2944
- if (QED_IS_BB(p_hwfn->cdev))
2945
- qed_hw_info_port_num_bb(p_hwfn, p_ptt);
2946
- else
2947
- qed_hw_info_port_num_ah(p_hwfn, p_ptt);
4377
+ u32 addr, global_offsize, global_addr, port_mode;
4378
+ struct qed_dev *cdev = p_hwfn->cdev;
4379
+
4380
+ /* In CMT there is always only one port */
4381
+ if (cdev->num_hwfns > 1) {
4382
+ cdev->num_ports_in_engine = 1;
4383
+ cdev->num_ports = 1;
4384
+ return;
4385
+ }
4386
+
4387
+ /* Determine the number of ports per engine */
4388
+ port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE);
4389
+ switch (port_mode) {
4390
+ case 0x0:
4391
+ cdev->num_ports_in_engine = 1;
4392
+ break;
4393
+ case 0x1:
4394
+ cdev->num_ports_in_engine = 2;
4395
+ break;
4396
+ case 0x2:
4397
+ cdev->num_ports_in_engine = 4;
4398
+ break;
4399
+ default:
4400
+ DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode);
4401
+ cdev->num_ports_in_engine = 1; /* Default to something */
4402
+ break;
4403
+ }
4404
+
4405
+ /* Get the total number of ports of the device */
4406
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
4407
+ PUBLIC_GLOBAL);
4408
+ global_offsize = qed_rd(p_hwfn, p_ptt, addr);
4409
+ global_addr = SECTION_ADDR(global_offsize, 0);
4410
+ addr = global_addr + offsetof(struct public_global, max_ports);
4411
+ cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr);
29484412 }
29494413
29504414 static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
....@@ -2982,7 +4446,8 @@
29824446 return rc;
29834447 }
29844448
2985
- qed_hw_info_port_num(p_hwfn, p_ptt);
4449
+ if (IS_LEAD_HWFN(p_hwfn))
4450
+ qed_hw_info_port_num(p_hwfn, p_ptt);
29864451
29874452 qed_mcp_get_capabilities(p_hwfn, p_ptt);
29884453
....@@ -3018,7 +4483,7 @@
30184483 }
30194484
30204485 if (QED_IS_ROCE_PERSONALITY(p_hwfn))
3021
- p_hwfn->hw_info.multi_tc_roce_en = 1;
4486
+ p_hwfn->hw_info.multi_tc_roce_en = true;
30224487
30234488 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
30244489 p_hwfn->hw_info.num_active_tc = 1;
....@@ -3087,18 +4552,13 @@
30874552 return 0;
30884553 }
30894554
3090
-static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
3091
-{
3092
- kfree(p_hwfn->nvm_info.image_att);
3093
- p_hwfn->nvm_info.image_att = NULL;
3094
-}
3095
-
30964555 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
30974556 void __iomem *p_regview,
30984557 void __iomem *p_doorbells,
30994558 u64 db_phys_addr,
31004559 enum qed_pci_personality personality)
31014560 {
4561
+ struct qed_dev *cdev = p_hwfn->cdev;
31024562 int rc = 0;
31034563
31044564 /* Split PCI bars evenly between hwfns */
....@@ -3152,7 +4612,7 @@
31524612 /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
31534613 * is called as it sets the ports number in an engine.
31544614 */
3155
- if (IS_LEAD_HWFN(p_hwfn)) {
4615
+ if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) {
31564616 rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
31574617 if (rc)
31584618 DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
....@@ -3176,7 +4636,7 @@
31764636 return rc;
31774637 err3:
31784638 if (IS_LEAD_HWFN(p_hwfn))
3179
- qed_nvm_info_free(p_hwfn);
4639
+ qed_mcp_nvm_info_free(p_hwfn);
31804640 err2:
31814641 if (IS_LEAD_HWFN(p_hwfn))
31824642 qed_iov_free_hw_info(p_hwfn->cdev);
....@@ -3237,7 +4697,7 @@
32374697 if (rc) {
32384698 if (IS_PF(cdev)) {
32394699 qed_init_free(p_hwfn);
3240
- qed_nvm_info_free(p_hwfn);
4700
+ qed_mcp_nvm_info_free(p_hwfn);
32414701 qed_mcp_free(p_hwfn);
32424702 qed_hw_hwfn_free(p_hwfn);
32434703 }
....@@ -3271,280 +4731,7 @@
32714731
32724732 qed_iov_free_hw_info(cdev);
32734733
3274
- qed_nvm_info_free(p_hwfn);
3275
-}
3276
-
3277
-static void qed_chain_free_next_ptr(struct qed_dev *cdev,
3278
- struct qed_chain *p_chain)
3279
-{
3280
- void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
3281
- dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
3282
- struct qed_chain_next *p_next;
3283
- u32 size, i;
3284
-
3285
- if (!p_virt)
3286
- return;
3287
-
3288
- size = p_chain->elem_size * p_chain->usable_per_page;
3289
-
3290
- for (i = 0; i < p_chain->page_cnt; i++) {
3291
- if (!p_virt)
3292
- break;
3293
-
3294
- p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
3295
- p_virt_next = p_next->next_virt;
3296
- p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
3297
-
3298
- dma_free_coherent(&cdev->pdev->dev,
3299
- QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
3300
-
3301
- p_virt = p_virt_next;
3302
- p_phys = p_phys_next;
3303
- }
3304
-}
3305
-
3306
-static void qed_chain_free_single(struct qed_dev *cdev,
3307
- struct qed_chain *p_chain)
3308
-{
3309
- if (!p_chain->p_virt_addr)
3310
- return;
3311
-
3312
- dma_free_coherent(&cdev->pdev->dev,
3313
- QED_CHAIN_PAGE_SIZE,
3314
- p_chain->p_virt_addr, p_chain->p_phys_addr);
3315
-}
3316
-
3317
-static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
3318
-{
3319
- struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl;
3320
- u32 page_cnt = p_chain->page_cnt, i, pbl_size;
3321
-
3322
- if (!pp_addr_tbl)
3323
- return;
3324
-
3325
- for (i = 0; i < page_cnt; i++) {
3326
- if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map)
3327
- break;
3328
-
3329
- dma_free_coherent(&cdev->pdev->dev,
3330
- QED_CHAIN_PAGE_SIZE,
3331
- pp_addr_tbl[i].virt_addr,
3332
- pp_addr_tbl[i].dma_map);
3333
- }
3334
-
3335
- pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
3336
-
3337
- if (!p_chain->b_external_pbl)
3338
- dma_free_coherent(&cdev->pdev->dev,
3339
- pbl_size,
3340
- p_chain->pbl_sp.p_virt_table,
3341
- p_chain->pbl_sp.p_phys_table);
3342
-
3343
- vfree(p_chain->pbl.pp_addr_tbl);
3344
- p_chain->pbl.pp_addr_tbl = NULL;
3345
-}
3346
-
3347
-void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
3348
-{
3349
- switch (p_chain->mode) {
3350
- case QED_CHAIN_MODE_NEXT_PTR:
3351
- qed_chain_free_next_ptr(cdev, p_chain);
3352
- break;
3353
- case QED_CHAIN_MODE_SINGLE:
3354
- qed_chain_free_single(cdev, p_chain);
3355
- break;
3356
- case QED_CHAIN_MODE_PBL:
3357
- qed_chain_free_pbl(cdev, p_chain);
3358
- break;
3359
- }
3360
-}
3361
-
3362
-static int
3363
-qed_chain_alloc_sanity_check(struct qed_dev *cdev,
3364
- enum qed_chain_cnt_type cnt_type,
3365
- size_t elem_size, u32 page_cnt)
3366
-{
3367
- u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
3368
-
3369
- /* The actual chain size can be larger than the maximal possible value
3370
- * after rounding up the requested elements number to pages, and after
3371
- * taking into acount the unusuable elements (next-ptr elements).
3372
- * The size of a "u16" chain can be (U16_MAX + 1) since the chain
3373
- * size/capacity fields are of a u32 type.
3374
- */
3375
- if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
3376
- chain_size > ((u32)U16_MAX + 1)) ||
3377
- (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
3378
- DP_NOTICE(cdev,
3379
- "The actual chain size (0x%llx) is larger than the maximal possible value\n",
3380
- chain_size);
3381
- return -EINVAL;
3382
- }
3383
-
3384
- return 0;
3385
-}
3386
-
3387
-static int
3388
-qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
3389
-{
3390
- void *p_virt = NULL, *p_virt_prev = NULL;
3391
- dma_addr_t p_phys = 0;
3392
- u32 i;
3393
-
3394
- for (i = 0; i < p_chain->page_cnt; i++) {
3395
- p_virt = dma_alloc_coherent(&cdev->pdev->dev,
3396
- QED_CHAIN_PAGE_SIZE,
3397
- &p_phys, GFP_KERNEL);
3398
- if (!p_virt)
3399
- return -ENOMEM;
3400
-
3401
- if (i == 0) {
3402
- qed_chain_init_mem(p_chain, p_virt, p_phys);
3403
- qed_chain_reset(p_chain);
3404
- } else {
3405
- qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
3406
- p_virt, p_phys);
3407
- }
3408
-
3409
- p_virt_prev = p_virt;
3410
- }
3411
- /* Last page's next element should point to the beginning of the
3412
- * chain.
3413
- */
3414
- qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
3415
- p_chain->p_virt_addr,
3416
- p_chain->p_phys_addr);
3417
-
3418
- return 0;
3419
-}
3420
-
3421
-static int
3422
-qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
3423
-{
3424
- dma_addr_t p_phys = 0;
3425
- void *p_virt = NULL;
3426
-
3427
- p_virt = dma_alloc_coherent(&cdev->pdev->dev,
3428
- QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
3429
- if (!p_virt)
3430
- return -ENOMEM;
3431
-
3432
- qed_chain_init_mem(p_chain, p_virt, p_phys);
3433
- qed_chain_reset(p_chain);
3434
-
3435
- return 0;
3436
-}
3437
-
3438
-static int
3439
-qed_chain_alloc_pbl(struct qed_dev *cdev,
3440
- struct qed_chain *p_chain,
3441
- struct qed_chain_ext_pbl *ext_pbl)
3442
-{
3443
- u32 page_cnt = p_chain->page_cnt, size, i;
3444
- dma_addr_t p_phys = 0, p_pbl_phys = 0;
3445
- struct addr_tbl_entry *pp_addr_tbl;
3446
- u8 *p_pbl_virt = NULL;
3447
- void *p_virt = NULL;
3448
-
3449
- size = page_cnt * sizeof(*pp_addr_tbl);
3450
- pp_addr_tbl = vzalloc(size);
3451
- if (!pp_addr_tbl)
3452
- return -ENOMEM;
3453
-
3454
- /* The allocation of the PBL table is done with its full size, since it
3455
- * is expected to be successive.
3456
- * qed_chain_init_pbl_mem() is called even in a case of an allocation
3457
- * failure, since tbl was previously allocated, and it
3458
- * should be saved to allow its freeing during the error flow.
3459
- */
3460
- size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
3461
-
3462
- if (!ext_pbl) {
3463
- p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
3464
- size, &p_pbl_phys, GFP_KERNEL);
3465
- } else {
3466
- p_pbl_virt = ext_pbl->p_pbl_virt;
3467
- p_pbl_phys = ext_pbl->p_pbl_phys;
3468
- p_chain->b_external_pbl = true;
3469
- }
3470
-
3471
- qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl);
3472
- if (!p_pbl_virt)
3473
- return -ENOMEM;
3474
-
3475
- for (i = 0; i < page_cnt; i++) {
3476
- p_virt = dma_alloc_coherent(&cdev->pdev->dev,
3477
- QED_CHAIN_PAGE_SIZE,
3478
- &p_phys, GFP_KERNEL);
3479
- if (!p_virt)
3480
- return -ENOMEM;
3481
-
3482
- if (i == 0) {
3483
- qed_chain_init_mem(p_chain, p_virt, p_phys);
3484
- qed_chain_reset(p_chain);
3485
- }
3486
-
3487
- /* Fill the PBL table with the physical address of the page */
3488
- *(dma_addr_t *)p_pbl_virt = p_phys;
3489
- /* Keep the virtual address of the page */
3490
- p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt;
3491
- p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys;
3492
-
3493
- p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
3494
- }
3495
-
3496
- return 0;
3497
-}
3498
-
3499
-int qed_chain_alloc(struct qed_dev *cdev,
3500
- enum qed_chain_use_mode intended_use,
3501
- enum qed_chain_mode mode,
3502
- enum qed_chain_cnt_type cnt_type,
3503
- u32 num_elems,
3504
- size_t elem_size,
3505
- struct qed_chain *p_chain,
3506
- struct qed_chain_ext_pbl *ext_pbl)
3507
-{
3508
- u32 page_cnt;
3509
- int rc = 0;
3510
-
3511
- if (mode == QED_CHAIN_MODE_SINGLE)
3512
- page_cnt = 1;
3513
- else
3514
- page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
3515
-
3516
- rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
3517
- if (rc) {
3518
- DP_NOTICE(cdev,
3519
- "Cannot allocate a chain with the given arguments:\n");
3520
- DP_NOTICE(cdev,
3521
- "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
3522
- intended_use, mode, cnt_type, num_elems, elem_size);
3523
- return rc;
3524
- }
3525
-
3526
- qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
3527
- mode, cnt_type);
3528
-
3529
- switch (mode) {
3530
- case QED_CHAIN_MODE_NEXT_PTR:
3531
- rc = qed_chain_alloc_next_ptr(cdev, p_chain);
3532
- break;
3533
- case QED_CHAIN_MODE_SINGLE:
3534
- rc = qed_chain_alloc_single(cdev, p_chain);
3535
- break;
3536
- case QED_CHAIN_MODE_PBL:
3537
- rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
3538
- break;
3539
- }
3540
- if (rc)
3541
- goto nomem;
3542
-
3543
- return 0;
3544
-
3545
-nomem:
3546
- qed_chain_free(cdev, p_chain);
3547
- return rc;
4734
+ qed_mcp_nvm_info_free(p_hwfn);
35484735 }
35494736
35504737 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
....@@ -3602,269 +4789,6 @@
36024789 *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
36034790
36044791 return 0;
3605
-}
3606
-
3607
-static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
3608
- u8 *p_filter)
3609
-{
3610
- *p_high = p_filter[1] | (p_filter[0] << 8);
3611
- *p_low = p_filter[5] | (p_filter[4] << 8) |
3612
- (p_filter[3] << 16) | (p_filter[2] << 24);
3613
-}
3614
-
3615
-int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
3616
- struct qed_ptt *p_ptt, u8 *p_filter)
3617
-{
3618
- u32 high = 0, low = 0, en;
3619
- int i;
3620
-
3621
- if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
3622
- return 0;
3623
-
3624
- qed_llh_mac_to_filter(&high, &low, p_filter);
3625
-
3626
- /* Find a free entry and utilize it */
3627
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
3628
- en = qed_rd(p_hwfn, p_ptt,
3629
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
3630
- if (en)
3631
- continue;
3632
- qed_wr(p_hwfn, p_ptt,
3633
- NIG_REG_LLH_FUNC_FILTER_VALUE +
3634
- 2 * i * sizeof(u32), low);
3635
- qed_wr(p_hwfn, p_ptt,
3636
- NIG_REG_LLH_FUNC_FILTER_VALUE +
3637
- (2 * i + 1) * sizeof(u32), high);
3638
- qed_wr(p_hwfn, p_ptt,
3639
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
3640
- qed_wr(p_hwfn, p_ptt,
3641
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
3642
- i * sizeof(u32), 0);
3643
- qed_wr(p_hwfn, p_ptt,
3644
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
3645
- break;
3646
- }
3647
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
3648
- DP_NOTICE(p_hwfn,
3649
- "Failed to find an empty LLH filter to utilize\n");
3650
- return -EINVAL;
3651
- }
3652
-
3653
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3654
- "mac: %pM is added at %d\n",
3655
- p_filter, i);
3656
-
3657
- return 0;
3658
-}
3659
-
3660
-void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
3661
- struct qed_ptt *p_ptt, u8 *p_filter)
3662
-{
3663
- u32 high = 0, low = 0;
3664
- int i;
3665
-
3666
- if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
3667
- return;
3668
-
3669
- qed_llh_mac_to_filter(&high, &low, p_filter);
3670
-
3671
- /* Find the entry and clean it */
3672
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
3673
- if (qed_rd(p_hwfn, p_ptt,
3674
- NIG_REG_LLH_FUNC_FILTER_VALUE +
3675
- 2 * i * sizeof(u32)) != low)
3676
- continue;
3677
- if (qed_rd(p_hwfn, p_ptt,
3678
- NIG_REG_LLH_FUNC_FILTER_VALUE +
3679
- (2 * i + 1) * sizeof(u32)) != high)
3680
- continue;
3681
-
3682
- qed_wr(p_hwfn, p_ptt,
3683
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
3684
- qed_wr(p_hwfn, p_ptt,
3685
- NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
3686
- qed_wr(p_hwfn, p_ptt,
3687
- NIG_REG_LLH_FUNC_FILTER_VALUE +
3688
- (2 * i + 1) * sizeof(u32), 0);
3689
-
3690
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3691
- "mac: %pM is removed from %d\n",
3692
- p_filter, i);
3693
- break;
3694
- }
3695
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
3696
- DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
3697
-}
3698
-
3699
-int
3700
-qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
3701
- struct qed_ptt *p_ptt,
3702
- u16 source_port_or_eth_type,
3703
- u16 dest_port, enum qed_llh_port_filter_type_t type)
3704
-{
3705
- u32 high = 0, low = 0, en;
3706
- int i;
3707
-
3708
- if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
3709
- return 0;
3710
-
3711
- switch (type) {
3712
- case QED_LLH_FILTER_ETHERTYPE:
3713
- high = source_port_or_eth_type;
3714
- break;
3715
- case QED_LLH_FILTER_TCP_SRC_PORT:
3716
- case QED_LLH_FILTER_UDP_SRC_PORT:
3717
- low = source_port_or_eth_type << 16;
3718
- break;
3719
- case QED_LLH_FILTER_TCP_DEST_PORT:
3720
- case QED_LLH_FILTER_UDP_DEST_PORT:
3721
- low = dest_port;
3722
- break;
3723
- case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
3724
- case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
3725
- low = (source_port_or_eth_type << 16) | dest_port;
3726
- break;
3727
- default:
3728
- DP_NOTICE(p_hwfn,
3729
- "Non valid LLH protocol filter type %d\n", type);
3730
- return -EINVAL;
3731
- }
3732
- /* Find a free entry and utilize it */
3733
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
3734
- en = qed_rd(p_hwfn, p_ptt,
3735
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
3736
- if (en)
3737
- continue;
3738
- qed_wr(p_hwfn, p_ptt,
3739
- NIG_REG_LLH_FUNC_FILTER_VALUE +
3740
- 2 * i * sizeof(u32), low);
3741
- qed_wr(p_hwfn, p_ptt,
3742
- NIG_REG_LLH_FUNC_FILTER_VALUE +
3743
- (2 * i + 1) * sizeof(u32), high);
3744
- qed_wr(p_hwfn, p_ptt,
3745
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
3746
- qed_wr(p_hwfn, p_ptt,
3747
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
3748
- i * sizeof(u32), 1 << type);
3749
- qed_wr(p_hwfn, p_ptt,
3750
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
3751
- break;
3752
- }
3753
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
3754
- DP_NOTICE(p_hwfn,
3755
- "Failed to find an empty LLH filter to utilize\n");
3756
- return -EINVAL;
3757
- }
3758
- switch (type) {
3759
- case QED_LLH_FILTER_ETHERTYPE:
3760
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3761
- "ETH type %x is added at %d\n",
3762
- source_port_or_eth_type, i);
3763
- break;
3764
- case QED_LLH_FILTER_TCP_SRC_PORT:
3765
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3766
- "TCP src port %x is added at %d\n",
3767
- source_port_or_eth_type, i);
3768
- break;
3769
- case QED_LLH_FILTER_UDP_SRC_PORT:
3770
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3771
- "UDP src port %x is added at %d\n",
3772
- source_port_or_eth_type, i);
3773
- break;
3774
- case QED_LLH_FILTER_TCP_DEST_PORT:
3775
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3776
- "TCP dst port %x is added at %d\n", dest_port, i);
3777
- break;
3778
- case QED_LLH_FILTER_UDP_DEST_PORT:
3779
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3780
- "UDP dst port %x is added at %d\n", dest_port, i);
3781
- break;
3782
- case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
3783
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3784
- "TCP src/dst ports %x/%x are added at %d\n",
3785
- source_port_or_eth_type, dest_port, i);
3786
- break;
3787
- case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
3788
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3789
- "UDP src/dst ports %x/%x are added at %d\n",
3790
- source_port_or_eth_type, dest_port, i);
3791
- break;
3792
- }
3793
- return 0;
3794
-}
3795
-
3796
-void
3797
-qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
3798
- struct qed_ptt *p_ptt,
3799
- u16 source_port_or_eth_type,
3800
- u16 dest_port,
3801
- enum qed_llh_port_filter_type_t type)
3802
-{
3803
- u32 high = 0, low = 0;
3804
- int i;
3805
-
3806
- if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
3807
- return;
3808
-
3809
- switch (type) {
3810
- case QED_LLH_FILTER_ETHERTYPE:
3811
- high = source_port_or_eth_type;
3812
- break;
3813
- case QED_LLH_FILTER_TCP_SRC_PORT:
3814
- case QED_LLH_FILTER_UDP_SRC_PORT:
3815
- low = source_port_or_eth_type << 16;
3816
- break;
3817
- case QED_LLH_FILTER_TCP_DEST_PORT:
3818
- case QED_LLH_FILTER_UDP_DEST_PORT:
3819
- low = dest_port;
3820
- break;
3821
- case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
3822
- case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
3823
- low = (source_port_or_eth_type << 16) | dest_port;
3824
- break;
3825
- default:
3826
- DP_NOTICE(p_hwfn,
3827
- "Non valid LLH protocol filter type %d\n", type);
3828
- return;
3829
- }
3830
-
3831
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
3832
- if (!qed_rd(p_hwfn, p_ptt,
3833
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
3834
- continue;
3835
- if (!qed_rd(p_hwfn, p_ptt,
3836
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
3837
- continue;
3838
- if (!(qed_rd(p_hwfn, p_ptt,
3839
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
3840
- i * sizeof(u32)) & BIT(type)))
3841
- continue;
3842
- if (qed_rd(p_hwfn, p_ptt,
3843
- NIG_REG_LLH_FUNC_FILTER_VALUE +
3844
- 2 * i * sizeof(u32)) != low)
3845
- continue;
3846
- if (qed_rd(p_hwfn, p_ptt,
3847
- NIG_REG_LLH_FUNC_FILTER_VALUE +
3848
- (2 * i + 1) * sizeof(u32)) != high)
3849
- continue;
3850
-
3851
- qed_wr(p_hwfn, p_ptt,
3852
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
3853
- qed_wr(p_hwfn, p_ptt,
3854
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
3855
- qed_wr(p_hwfn, p_ptt,
3856
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
3857
- i * sizeof(u32), 0);
3858
- qed_wr(p_hwfn, p_ptt,
3859
- NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
3860
- qed_wr(p_hwfn, p_ptt,
3861
- NIG_REG_LLH_FUNC_FILTER_VALUE +
3862
- (2 * i + 1) * sizeof(u32), 0);
3863
- break;
3864
- }
3865
-
3866
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
3867
- DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
38684792 }
38694793
38704794 static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
....@@ -4012,11 +4936,11 @@
40124936 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
40134937 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
40144938
4015
- vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
4939
+ vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) /
40164940 min_pf_rate;
40174941 qed_init_vport_wfq(p_hwfn, p_ptt,
40184942 vport_params[i].first_tx_pq_id,
4019
- vport_params[i].vport_wfq);
4943
+ vport_params[i].wfq);
40204944 }
40214945 }
40224946
....@@ -4027,7 +4951,7 @@
40274951 int i;
40284952
40294953 for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
4030
- p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
4954
+ p_hwfn->qm_info.qm_vport_params[i].wfq = 1;
40314955 }
40324956
40334957 static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
....@@ -4043,7 +4967,7 @@
40434967 qed_init_wfq_default_param(p_hwfn, min_pf_rate);
40444968 qed_init_vport_wfq(p_hwfn, p_ptt,
40454969 vport_params[i].first_tx_pq_id,
4046
- vport_params[i].vport_wfq);
4970
+ vport_params[i].wfq);
40474971 }
40484972 }
40494973
....@@ -4061,6 +4985,11 @@
40614985 int non_requested_count = 0, req_count = 0, i, num_vports;
40624986
40634987 num_vports = p_hwfn->qm_info.num_vports;
4988
+
4989
+ if (num_vports < 2) {
4990
+ DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports);
4991
+ return -EINVAL;
4992
+ }
40644993
40654994 /* Accounting for the vports which are configured for WFQ explicitly */
40664995 for (i = 0; i < num_vports; i++) {
....@@ -4387,23 +5316,9 @@
43875316 sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
43885317 }
43895318
4390
-int qed_device_num_engines(struct qed_dev *cdev)
5319
+int qed_device_num_ports(struct qed_dev *cdev)
43915320 {
4392
- return QED_IS_BB(cdev) ? 2 : 1;
4393
-}
4394
-
4395
-static int qed_device_num_ports(struct qed_dev *cdev)
4396
-{
4397
- /* in CMT always only one port */
4398
- if (cdev->num_hwfns > 1)
4399
- return 1;
4400
-
4401
- return cdev->num_ports_in_engine * qed_device_num_engines(cdev);
4402
-}
4403
-
4404
-int qed_device_get_port_id(struct qed_dev *cdev)
4405
-{
4406
- return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
5321
+ return cdev->num_ports;
44075322 }
44085323
44095324 void qed_set_fw_mac_addr(__le16 *fw_msb,