hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/soc/fsl/dpio/dpio-service.c
....@@ -1,7 +1,7 @@
11 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
22 /*
33 * Copyright 2014-2016 Freescale Semiconductor Inc.
4
- * Copyright 2016 NXP
4
+ * Copyright 2016-2019 NXP
55 *
66 */
77 #include <linux/types.h>
....@@ -27,6 +27,7 @@
2727 /* protect notifications list */
2828 spinlock_t lock_notifications;
2929 struct list_head notifications;
30
+ struct device *dev;
3031 };
3132
3233 struct dpaa2_io_store {
....@@ -57,8 +58,8 @@
5758 * If cpu == -1, choose the current cpu, with no guarantees about
5859 * potentially being migrated away.
5960 */
60
- if (unlikely(cpu < 0))
61
- cpu = smp_processor_id();
61
+ if (cpu < 0)
62
+ cpu = raw_smp_processor_id();
6263
6364 /* If a specific cpu was requested, pick it up immediately */
6465 return dpio_by_cpu[cpu];
....@@ -66,6 +67,10 @@
6667
6768 static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
6869 {
70
+ if (d)
71
+ return d;
72
+
73
+ d = service_select_by_cpu(d, -1);
6974 if (d)
7075 return d;
7176
....@@ -98,13 +103,15 @@
98103 /**
99104 * dpaa2_io_create() - create a dpaa2_io object.
100105 * @desc: the dpaa2_io descriptor
106
+ * @dev: the actual DPIO device
101107 *
102108 * Activates a "struct dpaa2_io" corresponding to the given config of an actual
103109 * DPIO object.
104110 *
105111 * Return a valid dpaa2_io object for success, or NULL for failure.
106112 */
107
-struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
113
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
114
+ struct device *dev)
108115 {
109116 struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
110117
....@@ -146,6 +153,8 @@
146153 dpio_by_cpu[desc->cpu] = obj;
147154 spin_unlock(&dpio_list_lock);
148155
156
+ obj->dev = dev;
157
+
149158 return obj;
150159 }
151160
....@@ -160,6 +169,11 @@
160169 */
161170 void dpaa2_io_down(struct dpaa2_io *d)
162171 {
172
+ spin_lock(&dpio_list_lock);
173
+ dpio_by_cpu[d->dpio_desc.cpu] = NULL;
174
+ list_del(&d->node);
175
+ spin_unlock(&dpio_list_lock);
176
+
163177 kfree(d);
164178 }
165179
....@@ -210,10 +224,24 @@
210224 }
211225
212226 /**
227
+ * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
228
+ *
229
+ * @d: the given DPIO object.
230
+ *
231
+ * Return the cpu associated with the DPIO object
232
+ */
233
+int dpaa2_io_get_cpu(struct dpaa2_io *d)
234
+{
235
+ return d->dpio_desc.cpu;
236
+}
237
+EXPORT_SYMBOL(dpaa2_io_get_cpu);
238
+
239
+/**
213240 * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
214241 * notifications on the given DPIO service.
215242 * @d: the given DPIO service.
216243 * @ctx: the notification context.
244
+ * @dev: the device that requests the register
217245 *
218246 * The caller should make the MC command to attach a DPAA2 object to
219247 * a DPIO after this function completes successfully. In that way:
....@@ -228,13 +256,19 @@
228256 * Return 0 for success, or -ENODEV for failure.
229257 */
230258 int dpaa2_io_service_register(struct dpaa2_io *d,
231
- struct dpaa2_io_notification_ctx *ctx)
259
+ struct dpaa2_io_notification_ctx *ctx,
260
+ struct device *dev)
232261 {
262
+ struct device_link *link;
233263 unsigned long irqflags;
234264
235265 d = service_select_by_cpu(d, ctx->desired_cpu);
236266 if (!d)
237267 return -ENODEV;
268
+
269
+ link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
270
+ if (!link)
271
+ return -EINVAL;
238272
239273 ctx->dpio_id = d->dpio_desc.dpio_id;
240274 ctx->qman64 = (u64)(uintptr_t)ctx;
....@@ -256,12 +290,14 @@
256290 * dpaa2_io_service_deregister - The opposite of 'register'.
257291 * @service: the given DPIO service.
258292 * @ctx: the notification context.
293
+ * @dev: the device that requests to be deregistered
259294 *
260295 * This function should be called only after sending the MC command to
261296 * to detach the notification-producing device from the DPIO.
262297 */
263298 void dpaa2_io_service_deregister(struct dpaa2_io *service,
264
- struct dpaa2_io_notification_ctx *ctx)
299
+ struct dpaa2_io_notification_ctx *ctx,
300
+ struct device *dev)
265301 {
266302 struct dpaa2_io *d = ctx->dpio_private;
267303 unsigned long irqflags;
....@@ -272,6 +308,7 @@
272308 spin_lock_irqsave(&d->lock_notifications, irqflags);
273309 list_del(&ctx->node);
274310 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
311
+
275312 }
276313 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
277314
....@@ -310,6 +347,37 @@
310347 EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
311348
312349 /**
350
+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
351
+ * @d: the given DPIO service.
352
+ * @fqid: the given frame queue id.
353
+ * @s: the dpaa2_io_store object for the result.
354
+ *
355
+ * Return 0 for success, or error code for failure.
356
+ */
357
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
358
+ struct dpaa2_io_store *s)
359
+{
360
+ struct qbman_pull_desc pd;
361
+ int err;
362
+
363
+ qbman_pull_desc_clear(&pd);
364
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
365
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
366
+ qbman_pull_desc_set_fq(&pd, fqid);
367
+
368
+ d = service_select(d);
369
+ if (!d)
370
+ return -ENODEV;
371
+ s->swp = d->swp;
372
+ err = qbman_swp_pull(d->swp, &pd);
373
+ if (err)
374
+ s->swp = NULL;
375
+
376
+ return err;
377
+}
378
+EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
379
+
380
+/**
313381 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
314382 * @d: the given DPIO service.
315383 * @channelid: the given channel id.
....@@ -340,6 +408,105 @@
340408 return err;
341409 }
342410 EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
411
+
412
+/**
413
+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
414
+ * @d: the given DPIO service.
415
+ * @fqid: the given frame queue id.
416
+ * @fd: the frame descriptor which is enqueued.
417
+ *
418
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
419
+ * or -ENODEV if there is no dpio service.
420
+ */
421
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
422
+ u32 fqid,
423
+ const struct dpaa2_fd *fd)
424
+{
425
+ struct qbman_eq_desc ed;
426
+
427
+ d = service_select(d);
428
+ if (!d)
429
+ return -ENODEV;
430
+
431
+ qbman_eq_desc_clear(&ed);
432
+ qbman_eq_desc_set_no_orp(&ed, 0);
433
+ qbman_eq_desc_set_fq(&ed, fqid);
434
+
435
+ return qbman_swp_enqueue(d->swp, &ed, fd);
436
+}
437
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
438
+
439
+/**
440
+ * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
441
+ * to a frame queue using one fqid.
442
+ * @d: the given DPIO service.
443
+ * @fqid: the given frame queue id.
444
+ * @fd: the frame descriptor which is enqueued.
445
+ * @nb: number of frames to be enqueud
446
+ *
447
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
448
+ * or -ENODEV if there is no dpio service.
449
+ */
450
+int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
451
+ u32 fqid,
452
+ const struct dpaa2_fd *fd,
453
+ int nb)
454
+{
455
+ struct qbman_eq_desc ed;
456
+
457
+ d = service_select(d);
458
+ if (!d)
459
+ return -ENODEV;
460
+
461
+ qbman_eq_desc_clear(&ed);
462
+ qbman_eq_desc_set_no_orp(&ed, 0);
463
+ qbman_eq_desc_set_fq(&ed, fqid);
464
+
465
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
466
+}
467
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
468
+
469
+/**
470
+ * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
471
+ * to different frame queue using a list of fqids.
472
+ * @d: the given DPIO service.
473
+ * @fqid: the given list of frame queue ids.
474
+ * @fd: the frame descriptor which is enqueued.
475
+ * @nb: number of frames to be enqueud
476
+ *
477
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
478
+ * or -ENODEV if there is no dpio service.
479
+ */
480
+int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
481
+ u32 *fqid,
482
+ const struct dpaa2_fd *fd,
483
+ int nb)
484
+{
485
+ struct qbman_eq_desc *ed;
486
+ int i, ret;
487
+
488
+ ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
489
+ if (!ed)
490
+ return -ENOMEM;
491
+
492
+ d = service_select(d);
493
+ if (!d) {
494
+ ret = -ENODEV;
495
+ goto out;
496
+ }
497
+
498
+ for (i = 0; i < nb; i++) {
499
+ qbman_eq_desc_clear(&ed[i]);
500
+ qbman_eq_desc_set_no_orp(&ed[i], 0);
501
+ qbman_eq_desc_set_fq(&ed[i], fqid[i]);
502
+ }
503
+
504
+ ret = qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
505
+out:
506
+ kfree(ed);
507
+ return ret;
508
+}
509
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
343510
344511 /**
345512 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
....@@ -380,7 +547,7 @@
380547 * Return 0 for success, and negative error code for failure.
381548 */
382549 int dpaa2_io_service_release(struct dpaa2_io *d,
383
- u32 bpid,
550
+ u16 bpid,
384551 const u64 *buffers,
385552 unsigned int num_buffers)
386553 {
....@@ -409,7 +576,7 @@
409576 * Eg. if the buffer pool is empty, this will return zero.
410577 */
411578 int dpaa2_io_service_acquire(struct dpaa2_io *d,
412
- u32 bpid,
579
+ u16 bpid,
413580 u64 *buffers,
414581 unsigned int num_buffers)
415582 {
....@@ -435,7 +602,7 @@
435602
436603 /**
437604 * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
438
- * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
605
+ * @max_frames: the maximum number of dequeued result for frames, must be <= 32.
439606 * @dev: the device to allow mapping/unmapping the DMAable region.
440607 *
441608 * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
....@@ -450,7 +617,7 @@
450617 struct dpaa2_io_store *ret;
451618 size_t size;
452619
453
- if (!max_frames || (max_frames > 16))
620
+ if (!max_frames || (max_frames > 32))
454621 return NULL;
455622
456623 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
....@@ -537,9 +704,78 @@
537704 if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
538705 ret = NULL;
539706 } else {
707
+ prefetch(&s->vaddr[s->idx]);
540708 *is_last = 0;
541709 }
542710
543711 return ret;
544712 }
545713 EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
714
+
715
+/**
716
+ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
717
+ * @d: the given DPIO object.
718
+ * @fqid: the id of frame queue to be queried.
719
+ * @fcnt: the queried frame count.
720
+ * @bcnt: the queried byte count.
721
+ *
722
+ * Knowing the FQ count at run-time can be useful in debugging situations.
723
+ * The instantaneous frame- and byte-count are hereby returned.
724
+ *
725
+ * Return 0 for a successful query, and negative error code if query fails.
726
+ */
727
+int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
728
+ u32 *fcnt, u32 *bcnt)
729
+{
730
+ struct qbman_fq_query_np_rslt state;
731
+ struct qbman_swp *swp;
732
+ unsigned long irqflags;
733
+ int ret;
734
+
735
+ d = service_select(d);
736
+ if (!d)
737
+ return -ENODEV;
738
+
739
+ swp = d->swp;
740
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
741
+ ret = qbman_fq_query_state(swp, fqid, &state);
742
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
743
+ if (ret)
744
+ return ret;
745
+ *fcnt = qbman_fq_state_frame_count(&state);
746
+ *bcnt = qbman_fq_state_byte_count(&state);
747
+
748
+ return 0;
749
+}
750
+EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
751
+
752
+/**
753
+ * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
754
+ * buffer pool.
755
+ * @d: the given DPIO object.
756
+ * @bpid: the index of buffer pool to be queried.
757
+ * @num: the queried number of buffers in the buffer pool.
758
+ *
759
+ * Return 0 for a successful query, and negative error code if query fails.
760
+ */
761
+int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
762
+{
763
+ struct qbman_bp_query_rslt state;
764
+ struct qbman_swp *swp;
765
+ unsigned long irqflags;
766
+ int ret;
767
+
768
+ d = service_select(d);
769
+ if (!d)
770
+ return -ENODEV;
771
+
772
+ swp = d->swp;
773
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
774
+ ret = qbman_bp_query(swp, bpid, &state);
775
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
776
+ if (ret)
777
+ return ret;
778
+ *num = qbman_bp_info_num_free_bufs(&state);
779
+ return 0;
780
+}
781
+EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);