forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/soc/fsl/dpio/qbman-portal.c
....@@ -1,21 +1,17 @@
11 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
22 /*
33 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4
- * Copyright 2016 NXP
4
+ * Copyright 2016-2019 NXP
55 *
66 */
77
88 #include <asm/cacheflush.h>
99 #include <linux/io.h>
1010 #include <linux/slab.h>
11
+#include <linux/spinlock.h>
1112 #include <soc/fsl/dpaa2-global.h>
1213
1314 #include "qbman-portal.h"
14
-
15
-#define QMAN_REV_4000 0x04000000
16
-#define QMAN_REV_4100 0x04010000
17
-#define QMAN_REV_4101 0x04010001
18
-#define QMAN_REV_MASK 0xffff0000
1915
2016 /* All QBMan command and result structures use this "valid bit" encoding */
2117 #define QB_VALID_BIT ((u32)0x80)
....@@ -25,10 +21,18 @@
2521 #define QBMAN_WQCHAN_CONFIGURE 0x46
2622
2723 /* CINH register offsets */
24
+#define QBMAN_CINH_SWP_EQCR_PI 0x800
25
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
2826 #define QBMAN_CINH_SWP_EQAR 0x8c0
27
+#define QBMAN_CINH_SWP_CR_RT 0x900
28
+#define QBMAN_CINH_SWP_VDQCR_RT 0x940
29
+#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
30
+#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
2931 #define QBMAN_CINH_SWP_DQPI 0xa00
3032 #define QBMAN_CINH_SWP_DCAP 0xac0
3133 #define QBMAN_CINH_SWP_SDQCR 0xb00
34
+#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
35
+#define QBMAN_CINH_SWP_RCR_PI 0xc00
3236 #define QBMAN_CINH_SWP_RAR 0xcc0
3337 #define QBMAN_CINH_SWP_ISR 0xe00
3438 #define QBMAN_CINH_SWP_IER 0xe40
....@@ -42,6 +46,15 @@
4246 #define QBMAN_CENA_SWP_CR 0x600
4347 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
4448 #define QBMAN_CENA_SWP_VDQCR 0x780
49
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
50
+#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
51
+
52
+/* CENA register offsets in memory-backed mode */
53
+#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
54
+#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
55
+#define QBMAN_CENA_SWP_CR_MEM 0x1600
56
+#define QBMAN_CENA_SWP_RR_MEM 0x1680
57
+#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
4558
4659 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
4760 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
....@@ -62,6 +75,12 @@
6275 /* opaque token for static dequeues */
6376 #define QMAN_SDQCR_TOKEN 0xbb
6477
78
+#define QBMAN_EQCR_DCA_IDXMASK 0x0f
79
+#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
80
+
81
+#define EQ_DESC_SIZE_WITHOUT_FD 29
82
+#define EQ_DESC_SIZE_FD_START 32
83
+
6584 enum qbman_sdqcr_dct {
6685 qbman_sdqcr_dct_null = 0,
6786 qbman_sdqcr_dct_prio_ics,
....@@ -73,6 +92,82 @@
7392 qbman_sdqcr_fc_one = 0,
7493 qbman_sdqcr_fc_up_to_3 = 1
7594 };
95
+
96
+/* Internal Function declaration */
97
+static int qbman_swp_enqueue_direct(struct qbman_swp *s,
98
+ const struct qbman_eq_desc *d,
99
+ const struct dpaa2_fd *fd);
100
+static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
101
+ const struct qbman_eq_desc *d,
102
+ const struct dpaa2_fd *fd);
103
+static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
104
+ const struct qbman_eq_desc *d,
105
+ const struct dpaa2_fd *fd,
106
+ uint32_t *flags,
107
+ int num_frames);
108
+static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
109
+ const struct qbman_eq_desc *d,
110
+ const struct dpaa2_fd *fd,
111
+ uint32_t *flags,
112
+ int num_frames);
113
+static int
114
+qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
115
+ const struct qbman_eq_desc *d,
116
+ const struct dpaa2_fd *fd,
117
+ int num_frames);
118
+static
119
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
120
+ const struct qbman_eq_desc *d,
121
+ const struct dpaa2_fd *fd,
122
+ int num_frames);
123
+static int qbman_swp_pull_direct(struct qbman_swp *s,
124
+ struct qbman_pull_desc *d);
125
+static int qbman_swp_pull_mem_back(struct qbman_swp *s,
126
+ struct qbman_pull_desc *d);
127
+
128
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
129
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
130
+
131
+static int qbman_swp_release_direct(struct qbman_swp *s,
132
+ const struct qbman_release_desc *d,
133
+ const u64 *buffers,
134
+ unsigned int num_buffers);
135
+static int qbman_swp_release_mem_back(struct qbman_swp *s,
136
+ const struct qbman_release_desc *d,
137
+ const u64 *buffers,
138
+ unsigned int num_buffers);
139
+
140
+/* Function pointers */
141
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
142
+ const struct qbman_eq_desc *d,
143
+ const struct dpaa2_fd *fd)
144
+ = qbman_swp_enqueue_direct;
145
+
146
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
147
+ const struct qbman_eq_desc *d,
148
+ const struct dpaa2_fd *fd,
149
+ uint32_t *flags,
150
+ int num_frames)
151
+ = qbman_swp_enqueue_multiple_direct;
152
+
153
+int
154
+(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
155
+ const struct qbman_eq_desc *d,
156
+ const struct dpaa2_fd *fd,
157
+ int num_frames)
158
+ = qbman_swp_enqueue_multiple_desc_direct;
159
+
160
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
161
+ = qbman_swp_pull_direct;
162
+
163
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
164
+ = qbman_swp_dqrr_next_direct;
165
+
166
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
167
+ const struct qbman_release_desc *d,
168
+ const u64 *buffers,
169
+ unsigned int num_buffers)
170
+ = qbman_swp_release_direct;
76171
77172 /* Portal Access */
78173
....@@ -96,10 +191,13 @@
96191
97192 #define SWP_CFG_DQRR_MF_SHIFT 20
98193 #define SWP_CFG_EST_SHIFT 16
194
+#define SWP_CFG_CPBS_SHIFT 15
99195 #define SWP_CFG_WN_SHIFT 14
100196 #define SWP_CFG_RPM_SHIFT 12
101197 #define SWP_CFG_DCM_SHIFT 10
102198 #define SWP_CFG_EPM_SHIFT 8
199
+#define SWP_CFG_VPM_SHIFT 7
200
+#define SWP_CFG_CPM_SHIFT 6
103201 #define SWP_CFG_SD_SHIFT 5
104202 #define SWP_CFG_SP_SHIFT 4
105203 #define SWP_CFG_SE_SHIFT 3
....@@ -125,6 +223,17 @@
125223 ep << SWP_CFG_EP_SHIFT);
126224 }
127225
226
+#define QMAN_RT_MODE 0x00000100
227
+
228
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
229
+{
230
+ /* 'first' is included, 'last' is excluded */
231
+ if (first <= last)
232
+ return last - first;
233
+ else
234
+ return (2 * ringsize) - (first - last);
235
+}
236
+
128237 /**
129238 * qbman_swp_init() - Create a functional object representing the given
130239 * QBMan portal descriptor.
....@@ -135,17 +244,24 @@
135244 */
136245 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
137246 {
138
- struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
247
+ struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
139248 u32 reg;
249
+ u32 mask_size;
250
+ u32 eqcr_pi;
140251
141252 if (!p)
142253 return NULL;
254
+
255
+ spin_lock_init(&p->access_spinlock);
256
+
143257 p->desc = d;
144258 p->mc.valid_bit = QB_VALID_BIT;
145259 p->sdq = 0;
146260 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
147261 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
148262 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
263
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
264
+ p->mr.valid_bit = QB_VALID_BIT;
149265
150266 atomic_set(&p->vdq.available, 1);
151267 p->vdq.valid_bit = QB_VALID_BIT;
....@@ -163,26 +279,51 @@
163279 p->addr_cena = d->cena_bar;
164280 p->addr_cinh = d->cinh_bar;
165281
166
- reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
167
- 1, /* Writes Non-cacheable */
168
- 0, /* EQCR_CI stashing threshold */
169
- 3, /* RPM: Valid bit mode, RCR in array mode */
170
- 2, /* DCM: Discrete consumption ack mode */
171
- 3, /* EPM: Valid bit mode, EQCR in array mode */
172
- 0, /* mem stashing drop enable == FALSE */
173
- 1, /* mem stashing priority == TRUE */
174
- 0, /* mem stashing enable == FALSE */
175
- 1, /* dequeue stashing priority == TRUE */
176
- 0, /* dequeue stashing enable == FALSE */
177
- 0); /* EQCR_CI stashing priority == FALSE */
282
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
283
+
284
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
285
+ 1, /* Writes Non-cacheable */
286
+ 0, /* EQCR_CI stashing threshold */
287
+ 3, /* RPM: RCR in array mode */
288
+ 2, /* DCM: Discrete consumption ack */
289
+ 2, /* EPM: EQCR in ring mode */
290
+ 1, /* mem stashing drop enable enable */
291
+ 1, /* mem stashing priority enable */
292
+ 1, /* mem stashing enable */
293
+ 1, /* dequeue stashing priority enable */
294
+ 0, /* dequeue stashing enable enable */
295
+ 0); /* EQCR_CI stashing priority enable */
296
+ } else {
297
+ memset(p->addr_cena, 0, 64 * 1024);
298
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
299
+ 1, /* Writes Non-cacheable */
300
+ 1, /* EQCR_CI stashing threshold */
301
+ 3, /* RPM: RCR in array mode */
302
+ 2, /* DCM: Discrete consumption ack */
303
+ 0, /* EPM: EQCR in ring mode */
304
+ 1, /* mem stashing drop enable */
305
+ 1, /* mem stashing priority enable */
306
+ 1, /* mem stashing enable */
307
+ 1, /* dequeue stashing priority enable */
308
+ 0, /* dequeue stashing enable */
309
+ 0); /* EQCR_CI stashing priority enable */
310
+ reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
311
+ 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
312
+ 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
313
+ }
178314
179315 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
180316 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
181317 if (!reg) {
182318 pr_err("qbman: the portal is not enabled!\n");
319
+ kfree(p);
183320 return NULL;
184321 }
185322
323
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
324
+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
325
+ qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
326
+ }
186327 /*
187328 * SDQCR needs to be initialized to 0 when no channels are
188329 * being dequeued from or else the QMan HW will indicate an
....@@ -190,6 +331,30 @@
190331 * applied when dequeues from a specific channel are enabled.
191332 */
192333 qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
334
+
335
+ p->eqcr.pi_ring_size = 8;
336
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
337
+ p->eqcr.pi_ring_size = 32;
338
+ qbman_swp_enqueue_ptr =
339
+ qbman_swp_enqueue_mem_back;
340
+ qbman_swp_enqueue_multiple_ptr =
341
+ qbman_swp_enqueue_multiple_mem_back;
342
+ qbman_swp_enqueue_multiple_desc_ptr =
343
+ qbman_swp_enqueue_multiple_desc_mem_back;
344
+ qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
345
+ qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
346
+ qbman_swp_release_ptr = qbman_swp_release_mem_back;
347
+ }
348
+
349
+ for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
350
+ p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
351
+ eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
352
+ p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
353
+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
354
+ p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
355
+ & p->eqcr.pi_ci_mask;
356
+ p->eqcr.available = p->eqcr.pi_ring_size;
357
+
193358 return p;
194359 }
195360
....@@ -277,7 +442,10 @@
277442 */
278443 void *qbman_swp_mc_start(struct qbman_swp *p)
279444 {
280
- return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
445
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
446
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
447
+ else
448
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
281449 }
282450
283451 /*
....@@ -288,8 +456,14 @@
288456 {
289457 u8 *v = cmd;
290458
291
- dma_wmb();
292
- *v = cmd_verb | p->mc.valid_bit;
459
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
460
+ dma_wmb();
461
+ *v = cmd_verb | p->mc.valid_bit;
462
+ } else {
463
+ *v = cmd_verb | p->mc.valid_bit;
464
+ dma_wmb();
465
+ qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
466
+ }
293467 }
294468
295469 /*
....@@ -300,13 +474,27 @@
300474 {
301475 u32 *ret, verb;
302476
303
- ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
477
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
478
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
479
+ /* Remove the valid-bit - command completed if the rest
480
+ * is non-zero.
481
+ */
482
+ verb = ret[0] & ~QB_VALID_BIT;
483
+ if (!verb)
484
+ return NULL;
485
+ p->mc.valid_bit ^= QB_VALID_BIT;
486
+ } else {
487
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
488
+ /* Command completed if the valid bit is toggled */
489
+ if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
490
+ return NULL;
491
+ /* Command completed if the rest is non-zero */
492
+ verb = ret[0] & ~QB_VALID_BIT;
493
+ if (!verb)
494
+ return NULL;
495
+ p->mr.valid_bit ^= QB_VALID_BIT;
496
+ }
304497
305
- /* Remove the valid-bit - command completed if the rest is non-zero */
306
- verb = ret[0] & ~QB_VALID_BIT;
307
- if (!verb)
308
- return NULL;
309
- p->mc.valid_bit ^= QB_VALID_BIT;
310498 return ret;
311499 }
312500
....@@ -320,6 +508,7 @@
320508 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
321509 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
322510 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
511
+#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
323512
324513 /**
325514 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
....@@ -383,8 +572,9 @@
383572 #define EQAR_VB(eqar) ((eqar) & 0x80)
384573 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
385574
575
+#define QB_RT_BIT ((u32)0x100)
386576 /**
387
- * qbman_swp_enqueue() - Issue an enqueue command
577
+ * qbman_swp_enqueue_direct() - Issue an enqueue command
388578 * @s: the software portal used for enqueue
389579 * @d: the enqueue descriptor
390580 * @fd: the frame descriptor to be enqueued
....@@ -394,24 +584,345 @@
394584 *
395585 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
396586 */
397
-int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
398
- const struct dpaa2_fd *fd)
587
+static
588
+int qbman_swp_enqueue_direct(struct qbman_swp *s,
589
+ const struct qbman_eq_desc *d,
590
+ const struct dpaa2_fd *fd)
399591 {
400
- struct qbman_eq_desc *p;
401
- u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
592
+ int flags = 0;
593
+ int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
402594
403
- if (!EQAR_SUCCESS(eqar))
404
- return -EBUSY;
595
+ if (ret >= 0)
596
+ ret = 0;
597
+ else
598
+ ret = -EBUSY;
599
+ return ret;
600
+}
405601
406
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
407
- memcpy(&p->dca, &d->dca, 31);
408
- memcpy(&p->fd, fd, sizeof(*fd));
602
+/**
603
+ * qbman_swp_enqueue_mem_back() - Issue an enqueue command
604
+ * @s: the software portal used for enqueue
605
+ * @d: the enqueue descriptor
606
+ * @fd: the frame descriptor to be enqueued
607
+ *
608
+ * Please note that 'fd' should only be NULL if the "action" of the
609
+ * descriptor is "orp_hole" or "orp_nesn".
610
+ *
611
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
612
+ */
613
+static
614
+int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
615
+ const struct qbman_eq_desc *d,
616
+ const struct dpaa2_fd *fd)
617
+{
618
+ int flags = 0;
619
+ int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
620
+
621
+ if (ret >= 0)
622
+ ret = 0;
623
+ else
624
+ ret = -EBUSY;
625
+ return ret;
626
+}
627
+
628
+/**
629
+ * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
630
+ * using one enqueue descriptor
631
+ * @s: the software portal used for enqueue
632
+ * @d: the enqueue descriptor
633
+ * @fd: table pointer of frame descriptor table to be enqueued
634
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
635
+ * @num_frames: number of fd to be enqueued
636
+ *
637
+ * Return the number of fd enqueued, or a negative error number.
638
+ */
639
+static
640
+int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
641
+ const struct qbman_eq_desc *d,
642
+ const struct dpaa2_fd *fd,
643
+ uint32_t *flags,
644
+ int num_frames)
645
+{
646
+ uint32_t *p = NULL;
647
+ const uint32_t *cl = (uint32_t *)d;
648
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
649
+ int i, num_enqueued = 0;
650
+
651
+ spin_lock(&s->access_spinlock);
652
+ half_mask = (s->eqcr.pi_ci_mask>>1);
653
+ full_mask = s->eqcr.pi_ci_mask;
654
+
655
+ if (!s->eqcr.available) {
656
+ eqcr_ci = s->eqcr.ci;
657
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
658
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
659
+ s->eqcr.ci &= full_mask;
660
+
661
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
662
+ eqcr_ci, s->eqcr.ci);
663
+ if (!s->eqcr.available) {
664
+ spin_unlock(&s->access_spinlock);
665
+ return 0;
666
+ }
667
+ }
668
+
669
+ eqcr_pi = s->eqcr.pi;
670
+ num_enqueued = (s->eqcr.available < num_frames) ?
671
+ s->eqcr.available : num_frames;
672
+ s->eqcr.available -= num_enqueued;
673
+ /* Fill in the EQCR ring */
674
+ for (i = 0; i < num_enqueued; i++) {
675
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
676
+ /* Skip copying the verb */
677
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
678
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
679
+ &fd[i], sizeof(*fd));
680
+ eqcr_pi++;
681
+ }
682
+
683
+ dma_wmb();
409684
410685 /* Set the verb byte, have to substitute in the valid-bit */
411
- dma_wmb();
412
- p->verb = d->verb | EQAR_VB(eqar);
686
+ eqcr_pi = s->eqcr.pi;
687
+ for (i = 0; i < num_enqueued; i++) {
688
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
689
+ p[0] = cl[0] | s->eqcr.pi_vb;
690
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
691
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
413692
414
- return 0;
693
+ d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
694
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
695
+ }
696
+ eqcr_pi++;
697
+ if (!(eqcr_pi & half_mask))
698
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
699
+ }
700
+
701
+ /* Flush all the cacheline without load/store in between */
702
+ eqcr_pi = s->eqcr.pi;
703
+ for (i = 0; i < num_enqueued; i++)
704
+ eqcr_pi++;
705
+ s->eqcr.pi = eqcr_pi & full_mask;
706
+ spin_unlock(&s->access_spinlock);
707
+
708
+ return num_enqueued;
709
+}
710
+
711
+/**
712
+ * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
713
+ * using one enqueue descriptor
714
+ * @s: the software portal used for enqueue
715
+ * @d: the enqueue descriptor
716
+ * @fd: table pointer of frame descriptor table to be enqueued
717
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
718
+ * @num_frames: number of fd to be enqueued
719
+ *
720
+ * Return the number of fd enqueued, or a negative error number.
721
+ */
722
+static
723
+int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
724
+ const struct qbman_eq_desc *d,
725
+ const struct dpaa2_fd *fd,
726
+ uint32_t *flags,
727
+ int num_frames)
728
+{
729
+ uint32_t *p = NULL;
730
+ const uint32_t *cl = (uint32_t *)(d);
731
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
732
+ int i, num_enqueued = 0;
733
+ unsigned long irq_flags;
734
+
735
+ spin_lock_irqsave(&s->access_spinlock, irq_flags);
736
+
737
+ half_mask = (s->eqcr.pi_ci_mask>>1);
738
+ full_mask = s->eqcr.pi_ci_mask;
739
+ if (!s->eqcr.available) {
740
+ eqcr_ci = s->eqcr.ci;
741
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
742
+ s->eqcr.ci = *p & full_mask;
743
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
744
+ eqcr_ci, s->eqcr.ci);
745
+ if (!s->eqcr.available) {
746
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
747
+ return 0;
748
+ }
749
+ }
750
+
751
+ eqcr_pi = s->eqcr.pi;
752
+ num_enqueued = (s->eqcr.available < num_frames) ?
753
+ s->eqcr.available : num_frames;
754
+ s->eqcr.available -= num_enqueued;
755
+ /* Fill in the EQCR ring */
756
+ for (i = 0; i < num_enqueued; i++) {
757
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
758
+ /* Skip copying the verb */
759
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
760
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
761
+ &fd[i], sizeof(*fd));
762
+ eqcr_pi++;
763
+ }
764
+
765
+ /* Set the verb byte, have to substitute in the valid-bit */
766
+ eqcr_pi = s->eqcr.pi;
767
+ for (i = 0; i < num_enqueued; i++) {
768
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
769
+ p[0] = cl[0] | s->eqcr.pi_vb;
770
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
771
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
772
+
773
+ d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
774
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
775
+ }
776
+ eqcr_pi++;
777
+ if (!(eqcr_pi & half_mask))
778
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
779
+ }
780
+ s->eqcr.pi = eqcr_pi & full_mask;
781
+
782
+ dma_wmb();
783
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
784
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
785
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
786
+
787
+ return num_enqueued;
788
+}
789
+
790
+/**
791
+ * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
792
+ * using multiple enqueue descriptor
793
+ * @s: the software portal used for enqueue
794
+ * @d: table of minimal enqueue descriptor
795
+ * @fd: table pointer of frame descriptor table to be enqueued
796
+ * @num_frames: number of fd to be enqueued
797
+ *
798
+ * Return the number of fd enqueued, or a negative error number.
799
+ */
800
+static
801
+int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
802
+ const struct qbman_eq_desc *d,
803
+ const struct dpaa2_fd *fd,
804
+ int num_frames)
805
+{
806
+ uint32_t *p;
807
+ const uint32_t *cl;
808
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
809
+ int i, num_enqueued = 0;
810
+
811
+ half_mask = (s->eqcr.pi_ci_mask>>1);
812
+ full_mask = s->eqcr.pi_ci_mask;
813
+ if (!s->eqcr.available) {
814
+ eqcr_ci = s->eqcr.ci;
815
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
816
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
817
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
818
+ eqcr_ci, s->eqcr.ci);
819
+ if (!s->eqcr.available)
820
+ return 0;
821
+ }
822
+
823
+ eqcr_pi = s->eqcr.pi;
824
+ num_enqueued = (s->eqcr.available < num_frames) ?
825
+ s->eqcr.available : num_frames;
826
+ s->eqcr.available -= num_enqueued;
827
+ /* Fill in the EQCR ring */
828
+ for (i = 0; i < num_enqueued; i++) {
829
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
830
+ cl = (uint32_t *)(&d[i]);
831
+ /* Skip copying the verb */
832
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
833
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
834
+ &fd[i], sizeof(*fd));
835
+ eqcr_pi++;
836
+ }
837
+
838
+ dma_wmb();
839
+
840
+ /* Set the verb byte, have to substitute in the valid-bit */
841
+ eqcr_pi = s->eqcr.pi;
842
+ for (i = 0; i < num_enqueued; i++) {
843
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
844
+ cl = (uint32_t *)(&d[i]);
845
+ p[0] = cl[0] | s->eqcr.pi_vb;
846
+ eqcr_pi++;
847
+ if (!(eqcr_pi & half_mask))
848
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
849
+ }
850
+
851
+ /* Flush all the cacheline without load/store in between */
852
+ eqcr_pi = s->eqcr.pi;
853
+ for (i = 0; i < num_enqueued; i++)
854
+ eqcr_pi++;
855
+ s->eqcr.pi = eqcr_pi & full_mask;
856
+
857
+ return num_enqueued;
858
+}
859
+
860
+/**
861
+ * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
862
+ * using multiple enqueue descriptor
863
+ * @s: the software portal used for enqueue
864
+ * @d: table of minimal enqueue descriptor
865
+ * @fd: table pointer of frame descriptor table to be enqueued
866
+ * @num_frames: number of fd to be enqueued
867
+ *
868
+ * Return the number of fd enqueued, or a negative error number.
869
+ */
870
+static
871
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
872
+ const struct qbman_eq_desc *d,
873
+ const struct dpaa2_fd *fd,
874
+ int num_frames)
875
+{
876
+ uint32_t *p;
877
+ const uint32_t *cl;
878
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
879
+ int i, num_enqueued = 0;
880
+
881
+ half_mask = (s->eqcr.pi_ci_mask>>1);
882
+ full_mask = s->eqcr.pi_ci_mask;
883
+ if (!s->eqcr.available) {
884
+ eqcr_ci = s->eqcr.ci;
885
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
886
+ s->eqcr.ci = *p & full_mask;
887
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
888
+ eqcr_ci, s->eqcr.ci);
889
+ if (!s->eqcr.available)
890
+ return 0;
891
+ }
892
+
893
+ eqcr_pi = s->eqcr.pi;
894
+ num_enqueued = (s->eqcr.available < num_frames) ?
895
+ s->eqcr.available : num_frames;
896
+ s->eqcr.available -= num_enqueued;
897
+ /* Fill in the EQCR ring */
898
+ for (i = 0; i < num_enqueued; i++) {
899
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
900
+ cl = (uint32_t *)(&d[i]);
901
+ /* Skip copying the verb */
902
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
903
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
904
+ &fd[i], sizeof(*fd));
905
+ eqcr_pi++;
906
+ }
907
+
908
+ /* Set the verb byte, have to substitute in the valid-bit */
909
+ eqcr_pi = s->eqcr.pi;
910
+ for (i = 0; i < num_enqueued; i++) {
911
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
912
+ cl = (uint32_t *)(&d[i]);
913
+ p[0] = cl[0] | s->eqcr.pi_vb;
914
+ eqcr_pi++;
915
+ if (!(eqcr_pi & half_mask))
916
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
917
+ }
918
+
919
+ s->eqcr.pi = eqcr_pi & full_mask;
920
+
921
+ dma_wmb();
922
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
923
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
924
+
925
+ return num_enqueued;
415926 }
416927
417928 /* Static (push) dequeue */
....@@ -569,7 +1080,7 @@
5691080 }
5701081
5711082 /**
572
- * qbman_swp_pull() - Issue the pull dequeue command
1083
+ * qbman_swp_pull_direct() - Issue the pull dequeue command
5731084 * @s: the software portal object
5741085 * @d: the software portal descriptor which has been configured with
5751086 * the set of qbman_pull_desc_set_*() calls
....@@ -577,7 +1088,8 @@
5771088 * Return 0 for success, and -EBUSY if the software portal is not ready
5781089 * to do pull dequeue.
5791090 */
580
-int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1091
+static
1092
+int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
5811093 {
5821094 struct qbman_pull_desc *p;
5831095
....@@ -586,14 +1098,16 @@
5861098 return -EBUSY;
5871099 }
5881100 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
589
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1101
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1102
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1103
+ else
1104
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
5901105 p->numf = d->numf;
5911106 p->tok = QMAN_DQ_TOKEN_VALID;
5921107 p->dq_src = d->dq_src;
5931108 p->rsp_addr = d->rsp_addr;
5941109 p->rsp_addr_virt = d->rsp_addr_virt;
5951110 dma_wmb();
596
-
5971111 /* Set the verb byte, have to substitute in the valid-bit */
5981112 p->verb = d->verb | s->vdq.valid_bit;
5991113 s->vdq.valid_bit ^= QB_VALID_BIT;
....@@ -601,17 +1115,55 @@
6011115 return 0;
6021116 }
6031117
1118
+/**
1119
+ * qbman_swp_pull_mem_back() - Issue the pull dequeue command
1120
+ * @s: the software portal object
1121
+ * @d: the software portal descriptor which has been configured with
1122
+ * the set of qbman_pull_desc_set_*() calls
1123
+ *
1124
+ * Return 0 for success, and -EBUSY if the software portal is not ready
1125
+ * to do pull dequeue.
1126
+ */
1127
+static
1128
+int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1129
+{
1130
+ struct qbman_pull_desc *p;
1131
+
1132
+ if (!atomic_dec_and_test(&s->vdq.available)) {
1133
+ atomic_inc(&s->vdq.available);
1134
+ return -EBUSY;
1135
+ }
1136
+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1137
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1138
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1139
+ else
1140
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1141
+ p->numf = d->numf;
1142
+ p->tok = QMAN_DQ_TOKEN_VALID;
1143
+ p->dq_src = d->dq_src;
1144
+ p->rsp_addr = d->rsp_addr;
1145
+ p->rsp_addr_virt = d->rsp_addr_virt;
1146
+
1147
+ /* Set the verb byte, have to substitute in the valid-bit */
1148
+ p->verb = d->verb | s->vdq.valid_bit;
1149
+ s->vdq.valid_bit ^= QB_VALID_BIT;
1150
+ dma_wmb();
1151
+ qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1152
+
1153
+ return 0;
1154
+}
1155
+
6041156 #define QMAN_DQRR_PI_MASK 0xf
6051157
6061158 /**
607
- * qbman_swp_dqrr_next() - Get an valid DQRR entry
1159
+ * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
6081160 * @s: the software portal object
6091161 *
6101162 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
6111163 * only once, so repeated calls can return a sequence of DQRR entries, without
6121164 * requiring they be consumed immediately or in any particular order.
6131165 */
614
-const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
1166
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
6151167 {
6161168 u32 verb;
6171169 u32 response_verb;
....@@ -655,6 +1207,98 @@
6551207 }
6561208
6571209 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1210
+ verb = p->dq.verb;
1211
+
1212
+ /*
1213
+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
1214
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
1215
+ * check, because we've already determined that a new entry is available
1216
+ * and we've invalidated the cacheline before reading it, so the
1217
+ * valid-bit behaviour is repaired and should tell us what we already
1218
+ * knew from reading PI.
1219
+ */
1220
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1221
+ prefetch(qbman_get_cmd(s,
1222
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1223
+ return NULL;
1224
+ }
1225
+ /*
1226
+ * There's something there. Move "next_idx" attention to the next ring
1227
+ * entry (and prefetch it) before returning what we found.
1228
+ */
1229
+ s->dqrr.next_idx++;
1230
+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1231
+ if (!s->dqrr.next_idx)
1232
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
1233
+
1234
+ /*
1235
+ * If this is the final response to a volatile dequeue command
1236
+ * indicate that the vdq is available
1237
+ */
1238
+ flags = p->dq.stat;
1239
+ response_verb = verb & QBMAN_RESULT_MASK;
1240
+ if ((response_verb == QBMAN_RESULT_DQ) &&
1241
+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
1242
+ (flags & DPAA2_DQ_STAT_EXPIRED))
1243
+ atomic_inc(&s->vdq.available);
1244
+
1245
+ prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1246
+
1247
+ return p;
1248
+}
1249
+
1250
+/**
1251
+ * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
1252
+ * @s: the software portal object
1253
+ *
1254
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1255
+ * only once, so repeated calls can return a sequence of DQRR entries, without
1256
+ * requiring they be consumed immediately or in any particular order.
1257
+ */
1258
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1259
+{
1260
+ u32 verb;
1261
+ u32 response_verb;
1262
+ u32 flags;
1263
+ struct dpaa2_dq *p;
1264
+
1265
+ /* Before using valid-bit to detect if something is there, we have to
1266
+ * handle the case of the DQRR reset bug...
1267
+ */
1268
+ if (unlikely(s->dqrr.reset_bug)) {
1269
+ /*
1270
+ * We pick up new entries by cache-inhibited producer index,
1271
+ * which means that a non-coherent mapping would require us to
1272
+ * invalidate and read *only* once that PI has indicated that
1273
+ * there's an entry here. The first trip around the DQRR ring
1274
+ * will be much less efficient than all subsequent trips around
1275
+ * it...
1276
+ */
1277
+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1278
+ QMAN_DQRR_PI_MASK;
1279
+
1280
+ /* there are new entries if pi != next_idx */
1281
+ if (pi == s->dqrr.next_idx)
1282
+ return NULL;
1283
+
1284
+ /*
1285
+ * if next_idx is/was the last ring index, and 'pi' is
1286
+ * different, we can disable the workaround as all the ring
1287
+ * entries have now been DMA'd to so valid-bit checking is
1288
+ * repaired. Note: this logic needs to be based on next_idx
1289
+ * (which increments one at a time), rather than on pi (which
1290
+ * can burst and wrap-around between our snapshots of it).
1291
+ */
1292
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1293
+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1294
+ s->dqrr.next_idx, pi);
1295
+ s->dqrr.reset_bug = 0;
1296
+ }
1297
+ prefetch(qbman_get_cmd(s,
1298
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1299
+ }
1300
+
1301
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
6581302 verb = p->dq.verb;
6591303
6601304 /*
....@@ -783,7 +1427,7 @@
7831427 #define RAR_SUCCESS(rar) ((rar) & 0x100)
7841428
7851429 /**
786
- * qbman_swp_release() - Issue a buffer release command
1430
+ * qbman_swp_release_direct() - Issue a buffer release command
7871431 * @s: the software portal object
7881432 * @d: the release descriptor
7891433 * @buffers: a pointer pointing to the buffer address to be released
....@@ -791,8 +1435,9 @@
7911435 *
7921436 * Return 0 for success, -EBUSY if the release command ring is not ready.
7931437 */
794
-int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
795
- const u64 *buffers, unsigned int num_buffers)
1438
+int qbman_swp_release_direct(struct qbman_swp *s,
1439
+ const struct qbman_release_desc *d,
1440
+ const u64 *buffers, unsigned int num_buffers)
7961441 {
7971442 int i;
7981443 struct qbman_release_desc *p;
....@@ -807,17 +1452,58 @@
8071452
8081453 /* Start the release command */
8091454 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1455
+
8101456 /* Copy the caller's buffer pointers to the command */
8111457 for (i = 0; i < num_buffers; i++)
8121458 p->buf[i] = cpu_to_le64(buffers[i]);
8131459 p->bpid = d->bpid;
8141460
8151461 /*
816
- * Set the verb byte, have to substitute in the valid-bit and the number
817
- * of buffers.
1462
+ * Set the verb byte, have to substitute in the valid-bit
1463
+ * and the number of buffers.
8181464 */
8191465 dma_wmb();
8201466 p->verb = d->verb | RAR_VB(rar) | num_buffers;
1467
+
1468
+ return 0;
1469
+}
1470
+
1471
+/**
1472
+ * qbman_swp_release_mem_back() - Issue a buffer release command
1473
+ * @s: the software portal object
1474
+ * @d: the release descriptor
1475
+ * @buffers: a pointer pointing to the buffer address to be released
1476
+ * @num_buffers: number of buffers to be released, must be less than 8
1477
+ *
1478
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
1479
+ */
1480
+int qbman_swp_release_mem_back(struct qbman_swp *s,
1481
+ const struct qbman_release_desc *d,
1482
+ const u64 *buffers, unsigned int num_buffers)
1483
+{
1484
+ int i;
1485
+ struct qbman_release_desc *p;
1486
+ u32 rar;
1487
+
1488
+ if (!num_buffers || (num_buffers > 7))
1489
+ return -EINVAL;
1490
+
1491
+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1492
+ if (!RAR_SUCCESS(rar))
1493
+ return -EBUSY;
1494
+
1495
+ /* Start the release command */
1496
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1497
+
1498
+ /* Copy the caller's buffer pointers to the command */
1499
+ for (i = 0; i < num_buffers; i++)
1500
+ p->buf[i] = cpu_to_le64(buffers[i]);
1501
+ p->bpid = d->bpid;
1502
+
1503
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
1504
+ dma_wmb();
1505
+ qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1506
+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
8211507
8221508 return 0;
8231509 }
....@@ -1003,3 +1689,99 @@
10031689
10041690 return 0;
10051691 }
1692
+
1693
+#define QBMAN_RESPONSE_VERB_MASK 0x7f
1694
+#define QBMAN_FQ_QUERY_NP 0x45
1695
+#define QBMAN_BP_QUERY 0x32
1696
+
1697
+struct qbman_fq_query_desc {
1698
+ u8 verb;
1699
+ u8 reserved[3];
1700
+ __le32 fqid;
1701
+ u8 reserved2[56];
1702
+};
1703
+
1704
+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1705
+ struct qbman_fq_query_np_rslt *r)
1706
+{
1707
+ struct qbman_fq_query_desc *p;
1708
+ void *resp;
1709
+
1710
+ p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1711
+ if (!p)
1712
+ return -EBUSY;
1713
+
1714
+ /* FQID is a 24 bit value */
1715
+ p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1716
+ resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1717
+ if (!resp) {
1718
+ pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1719
+ fqid);
1720
+ return -EIO;
1721
+ }
1722
+ *r = *(struct qbman_fq_query_np_rslt *)resp;
1723
+ /* Decode the outcome */
1724
+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1725
+
1726
+ /* Determine success or failure */
1727
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
1728
+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1729
+ p->fqid, r->rslt);
1730
+ return -EIO;
1731
+ }
1732
+
1733
+ return 0;
1734
+}
1735
+
1736
+u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1737
+{
1738
+ return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1739
+}
1740
+
1741
+u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1742
+{
1743
+ return le32_to_cpu(r->byte_cnt);
1744
+}
1745
+
1746
+struct qbman_bp_query_desc {
1747
+ u8 verb;
1748
+ u8 reserved;
1749
+ __le16 bpid;
1750
+ u8 reserved2[60];
1751
+};
1752
+
1753
+int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1754
+ struct qbman_bp_query_rslt *r)
1755
+{
1756
+ struct qbman_bp_query_desc *p;
1757
+ void *resp;
1758
+
1759
+ p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1760
+ if (!p)
1761
+ return -EBUSY;
1762
+
1763
+ p->bpid = cpu_to_le16(bpid);
1764
+ resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1765
+ if (!resp) {
1766
+ pr_err("qbman: Query BPID %d fields failed, no response\n",
1767
+ bpid);
1768
+ return -EIO;
1769
+ }
1770
+ *r = *(struct qbman_bp_query_rslt *)resp;
1771
+ /* Decode the outcome */
1772
+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1773
+
1774
+ /* Determine success or failure */
1775
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
1776
+ pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1777
+ bpid, r->rslt);
1778
+ return -EIO;
1779
+ }
1780
+
1781
+ return 0;
1782
+}
1783
+
1784
+u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1785
+{
1786
+ return le32_to_cpu(a->fill);
1787
+}