From 072de836f53be56a70cecf70b43ae43b7ce17376 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 10:08:36 +0000
Subject: [PATCH] mk-rootfs.sh

---
 kernel/drivers/dma/dmatest.c |  647 +++++++++++++++++++++++++++++++++++++++++-----------------
 1 files changed, 451 insertions(+), 196 deletions(-)

diff --git a/kernel/drivers/dma/dmatest.c b/kernel/drivers/dma/dmatest.c
index e38a653..a3a1721 100644
--- a/kernel/drivers/dma/dmatest.c
+++ b/kernel/drivers/dma/dmatest.c
@@ -1,15 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * DMA Engine test module
  *
  * Copyright (C) 2007 Atmel Corporation
  * Copyright (C) 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
@@ -26,11 +24,6 @@
 static unsigned int test_buf_size = 16384;
 module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
-
-static char test_channel[20];
-module_param_string(channel, test_channel, sizeof(test_channel),
-		S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
 
 static char test_device[32];
 module_param_string(device, test_device, sizeof(test_device),
@@ -68,7 +61,7 @@
 		"Number of p+q source buffers (default: 3)");
 
 static int timeout = 3000;
-module_param(timeout, uint, S_IRUGO | S_IWUSR);
+module_param(timeout, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
 		 "Pass -1 for infinite timeout");
 
@@ -84,6 +77,18 @@
 module_param(verbose, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
 
+static int alignment = -1;
+module_param(alignment, int, 0644);
+MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))");
+
+static unsigned int transfer_size;
+module_param(transfer_size, uint, 0644);
+MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
+
+static bool polled;
+module_param(polled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
+
 /**
  * struct dmatest_params - test parameters.
  * @buf_size:		size of the memcpy test buffer
@@ -95,6 +100,11 @@
  * @xor_sources:	number of xor source buffers
  * @pq_sources:		number of p+q source buffers
  * @timeout:		transfer timeout in msec, -1 for infinite timeout
+ * @noverify:		disable data verification
+ * @norandom:		disable random offset setup
+ * @alignment:		custom data address alignment taken as 2^alignment
+ * @transfer_size:	custom transfer size in bytes
+ * @polled:		use polling for completion instead of interrupts
  */
 struct dmatest_params {
 	unsigned int	buf_size;
@@ -108,12 +118,19 @@
 	int		timeout;
 	bool		noverify;
 	bool		norandom;
+	int		alignment;
+	unsigned int	transfer_size;
+	bool		polled;
 };
 
 /**
  * struct dmatest_info - test information.
  * @params:		test parameters
+ * @channels:		channels under test
+ * @nr_channels:	number of channels under test
  * @lock:		access protection to the fields of this structure
+ * @did_init:		module has been initialized completely
+ * @last_error:		test has faced configuration issues
  */
 static struct dmatest_info {
 	/* Test parameters */
@@ -122,6 +139,7 @@
 	/* Internal state */
 	struct list_head	channels;
 	unsigned int		nr_channels;
+	int			last_error;
 	struct mutex		lock;
 	bool			did_init;
 } test_info = {
@@ -138,6 +156,28 @@
 static bool dmatest_run;
 module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(run, "Run the test (default: false)");
+
+static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
+static int dmatest_chan_get(char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops multi_chan_ops = {
+	.set = dmatest_chan_set,
+	.get = dmatest_chan_get,
+};
+
+static char test_channel[20];
+static struct kparam_string newchan_kps = {
+	.string = test_channel,
+	.maxlen = 20,
+};
+module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644);
+MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
+
+static int dmatest_test_list_get(char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops test_list_ops = {
+	.get = dmatest_test_list_get,
+};
+module_param_cb(test_list, &test_list_ops, NULL, 0444);
+MODULE_PARM_DESC(test_list, "Print current test list");
 
 /* Maximum amount of mismatched bytes in buffer to print */
 #define MAX_ERROR_COUNT		32
@@ -160,10 +200,24 @@
 #define PATTERN_COUNT_MASK	0x1f
 #define PATTERN_MEMSET_IDX	0x01
 
+/* Fixed point arithmetic ops */
+#define FIXPT_SHIFT		8
+#define FIXPNT_MASK		0xFF
+#define FIXPT_TO_INT(a)	((a) >> FIXPT_SHIFT)
+#define INT_TO_FIXPT(a)	((a) << FIXPT_SHIFT)
+#define FIXPT_GET_FRAC(a)	((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT)
+
 /* poor man's completion - we want to use wait_event_freezable() on it */
 struct dmatest_done {
 	bool			done;
 	wait_queue_head_t	*wait;
+};
+
+struct dmatest_data {
+	u8		**raw;
+	u8		**aligned;
+	unsigned int	cnt;
+	unsigned int	off;
 };
 
 struct dmatest_thread {
@@ -171,14 +225,13 @@
 	struct dmatest_info	*info;
 	struct task_struct	*task;
 	struct dma_chan		*chan;
-	u8			**srcs;
-	u8			**usrcs;
-	u8			**dsts;
-	u8			**udsts;
+	struct dmatest_data	src;
+	struct dmatest_data	dst;
 	enum dma_transaction_type type;
 	wait_queue_head_t done_wait;
 	struct dmatest_done test_done;
 	bool			done;
+	bool			pending;
 };
 
 struct dmatest_chan {
@@ -198,7 +251,23 @@
 		struct dmatest_thread *thread;
 
 		list_for_each_entry(thread, &dtc->threads, node) {
-			if (!thread->done)
+			if (!thread->done && !thread->pending)
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static bool is_threaded_test_pending(struct dmatest_info *info)
+{
+	struct dmatest_chan *dtc;
+
+	list_for_each_entry(dtc, &info->channels, node) {
+		struct dmatest_thread *thread;
+
+		list_for_each_entry(thread, &dtc->threads, node) {
+			if (thread->pending)
 				return true;
 		}
 	}
@@ -386,8 +455,13 @@
 static void result(const char *err, unsigned int n, unsigned int src_off,
 		   unsigned int dst_off, unsigned int len, unsigned long data)
 {
-	pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
-		current->comm, n, err, src_off, dst_off, len, data);
+	if (IS_ERR_VALUE(data)) {
+		pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%ld)\n",
+			current->comm, n, err, src_off, dst_off, len, data);
+	} else {
+		pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
+			current->comm, n, err, src_off, dst_off, len, data);
+	}
 }
 
 static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
@@ -419,13 +493,62 @@
 	}
 
 	per_sec *= val;
+	per_sec = INT_TO_FIXPT(per_sec);
 	do_div(per_sec, runtime);
+
 	return per_sec;
 }
 
 static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
 {
-	return dmatest_persec(runtime, len >> 10);
+	return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10));
+}
+
+static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt)
+{
+	unsigned int i;
+
+	for (i = 0; i < cnt; i++)
+		kfree(d->raw[i]);
+
+	kfree(d->aligned);
+	kfree(d->raw);
+}
+
+static void dmatest_free_test_data(struct dmatest_data *d)
+{
+	__dmatest_free_test_data(d, d->cnt);
+}
+
+static int dmatest_alloc_test_data(struct dmatest_data *d,
+		unsigned int buf_size, u8 align)
+{
+	unsigned int i = 0;
+
+	d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
+	if (!d->raw)
+		return -ENOMEM;
+
+	d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
+	if (!d->aligned)
+		goto err;
+
+	for (i = 0; i < d->cnt; i++) {
+		d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
+		if (!d->raw[i])
+			goto err;
+
+		/* align to alignment restriction */
+		if (align)
+			d->aligned[i] = PTR_ALIGN(d->raw[i], align);
+		else
+			d->aligned[i] = d->raw[i];
+	}
+
+	return 0;
+err:
+	__dmatest_free_test_data(d, i);
+	return -ENOMEM;
 }
 
 /*
@@ -458,14 +581,16 @@
 	enum dma_ctrl_flags 	flags;
 	u8			*pq_coefs = NULL;
 	int			ret;
-	int			src_cnt;
-	int			dst_cnt;
+	unsigned int 		buf_size;
+	struct dmatest_data	*src;
+	struct dmatest_data	*dst;
 	int			i;
 	ktime_t			ktime, start, diff;
 	ktime_t			filltime = 0;
 	ktime_t			comparetime = 0;
 	s64			runtime = 0;
 	unsigned long long	total_len = 0;
+	unsigned long long	iops = 0;
 	u8			align = 0;
 	bool			is_memset = false;
 	dma_addr_t		*srcs;
@@ -476,95 +601,81 @@
 	ret = -ENOMEM;
 
 	smp_rmb();
+	thread->pending = false;
 	info = thread->info;
 	params = &info->params;
 	chan = thread->chan;
 	dev = chan->device;
+	src = &thread->src;
+	dst = &thread->dst;
 	if (thread->type == DMA_MEMCPY) {
-		align = dev->copy_align;
-		src_cnt = dst_cnt = 1;
+		align = params->alignment < 0 ? dev->copy_align :
+						params->alignment;
+		src->cnt = dst->cnt = 1;
 	} else if (thread->type == DMA_MEMSET) {
-		align = dev->fill_align;
-		src_cnt = dst_cnt = 1;
+		align = params->alignment < 0 ? dev->fill_align :
+						params->alignment;
+		src->cnt = dst->cnt = 1;
 		is_memset = true;
 	} else if (thread->type == DMA_XOR) {
 		/* force odd to ensure dst = src */
-		src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
-		dst_cnt = 1;
-		align = dev->xor_align;
+		src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
+		dst->cnt = 1;
+		align = params->alignment < 0 ? dev->xor_align :
+						params->alignment;
 	} else if (thread->type == DMA_PQ) {
 		/* force odd to ensure dst = src */
-		src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
-		dst_cnt = 2;
-		align = dev->pq_align;
+		src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
+		dst->cnt = 2;
+		align = params->alignment < 0 ? dev->pq_align :
+						params->alignment;
 
 		pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
 		if (!pq_coefs)
 			goto err_thread_type;
 
-		for (i = 0; i < src_cnt; i++)
+		for (i = 0; i < src->cnt; i++)
 			pq_coefs[i] = 1;
 	} else
 		goto err_thread_type;
 
-	thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
-	if (!thread->srcs)
-		goto err_srcs;
-
-	thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
-	if (!thread->usrcs)
-		goto err_usrcs;
-
-	for (i = 0; i < src_cnt; i++) {
-		thread->usrcs[i] = kmalloc(params->buf_size + align,
-					   GFP_KERNEL);
-		if (!thread->usrcs[i])
-			goto err_srcbuf;
-
-		/* align srcs to alignment restriction */
-		if (align)
-			thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
-		else
-			thread->srcs[i] = thread->usrcs[i];
+	/* Check if buffer count fits into map count variable (u8) */
+	if ((src->cnt + dst->cnt) >= 255) {
+		pr_err("too many buffers (%d of 255 supported)\n",
+		       src->cnt + dst->cnt);
+		goto err_free_coefs;
 	}
-	thread->srcs[i] = NULL;
 
-	thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
-	if (!thread->dsts)
-		goto err_dsts;
-
-	thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
-	if (!thread->udsts)
-		goto err_udsts;
-
-	for (i = 0; i < dst_cnt; i++) {
-		thread->udsts[i] = kmalloc(params->buf_size + align,
-					   GFP_KERNEL);
-		if (!thread->udsts[i])
-			goto err_dstbuf;
-
-		/* align dsts to alignment restriction */
-		if (align)
-			thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
-		else
-			thread->dsts[i] = thread->udsts[i];
+	buf_size = params->buf_size;
+	if (1 << align > buf_size) {
+		pr_err("%u-byte buffer too small for %d-byte alignment\n",
+		       buf_size, 1 << align);
+		goto err_free_coefs;
 	}
-	thread->dsts[i] = NULL;
+
+	if (dmatest_alloc_test_data(src, buf_size, align) < 0)
+		goto err_free_coefs;
+
+	if (dmatest_alloc_test_data(dst, buf_size, align) < 0)
+		goto err_src;
 
 	set_user_nice(current, 10);
 
-	srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+	srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL);
 	if (!srcs)
-		goto err_dstbuf;
+		goto err_dst;
 
-	dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+	dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL);
 	if (!dma_pq)
 		goto err_srcs_array;
 
 	/*
 	 * src and dst buffers are freed by ourselves below
 	 */
-	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+	if (params->polled)
+		flags = DMA_CTRL_ACK;
+	else
+		flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 
 	ktime = ktime_get();
 	while (!(kthread_should_stop() ||
@@ -572,86 +683,83 @@
 		struct dma_async_tx_descriptor *tx = NULL;
 		struct dmaengine_unmap_data *um;
 		dma_addr_t *dsts;
-		unsigned int src_off, dst_off, len;
+		unsigned int len;
 
 		total_tests++;
 
-		/* Check if buffer count fits into map count variable (u8) */
-		if ((src_cnt + dst_cnt) >= 255) {
-			pr_err("too many buffers (%d of 255 supported)\n",
-			       src_cnt + dst_cnt);
-			break;
+		if (params->transfer_size) {
+			if (params->transfer_size >= buf_size) {
+				pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
+				       params->transfer_size, buf_size);
+				break;
+			}
+			len = params->transfer_size;
+		} else if (params->norandom) {
+			len = buf_size;
+		} else {
+			len = dmatest_random() % buf_size + 1;
 		}
 
-		if (1 << align > params->buf_size) {
-			pr_err("%u-byte buffer too small for %d-byte alignment\n",
-			       params->buf_size, 1 << align);
-			break;
+		/* Do not alter transfer size explicitly defined by user */
+		if (!params->transfer_size) {
+			len = (len >> align) << align;
+			if (!len)
+				len = 1 << align;
 		}
-
-		if (params->norandom)
-			len = params->buf_size;
-		else
-			len = dmatest_random() % params->buf_size + 1;
-
-		len = (len >> align) << align;
-		if (!len)
-			len = 1 << align;
-
 		total_len += len;
 
 		if (params->norandom) {
-			src_off = 0;
-			dst_off = 0;
+			src->off = 0;
+			dst->off = 0;
 		} else {
-			src_off = dmatest_random() % (params->buf_size - len + 1);
-			dst_off = dmatest_random() % (params->buf_size - len + 1);
+			src->off = dmatest_random() % (buf_size - len + 1);
+			dst->off = dmatest_random() % (buf_size - len + 1);
 
-			src_off = (src_off >> align) << align;
-			dst_off = (dst_off >> align) << align;
+			src->off = (src->off >> align) << align;
+			dst->off = (dst->off >> align) << align;
 		}
 
 		if (!params->noverify) {
 			start = ktime_get();
-			dmatest_init_srcs(thread->srcs, src_off, len,
-					  params->buf_size, is_memset);
-			dmatest_init_dsts(thread->dsts, dst_off, len,
-					  params->buf_size, is_memset);
+			dmatest_init_srcs(src->aligned, src->off, len,
+					  buf_size, is_memset);
+			dmatest_init_dsts(dst->aligned, dst->off, len,
+					  buf_size, is_memset);
 
 			diff = ktime_sub(ktime_get(), start);
 			filltime = ktime_add(filltime, diff);
 		}
 
-		um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
+		um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
 					      GFP_KERNEL);
 		if (!um) {
 			failed_tests++;
 			result("unmap data NULL", total_tests,
-			       src_off, dst_off, len, ret);
+			       src->off, dst->off, len, ret);
 			continue;
 		}
 
-		um->len = params->buf_size;
-		for (i = 0; i < src_cnt; i++) {
-			void *buf = thread->srcs[i];
+		um->len = buf_size;
+		for (i = 0; i < src->cnt; i++) {
+			void *buf = src->aligned[i];
 			struct page *pg = virt_to_page(buf);
 			unsigned long pg_off = offset_in_page(buf);
 
 			um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
 						   um->len, DMA_TO_DEVICE);
-			srcs[i] = um->addr[i] + src_off;
+			srcs[i] = um->addr[i] + src->off;
 			ret = dma_mapping_error(dev->dev, um->addr[i]);
 			if (ret) {
 				result("src mapping error", total_tests,
-				       src_off, dst_off, len, ret);
+				       src->off, dst->off, len, ret);
 				goto error_unmap_continue;
 			}
 			um->to_cnt++;
 		}
 		/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
-		dsts = &um->addr[src_cnt];
-		for (i = 0; i < dst_cnt; i++) {
-			void *buf = thread->dsts[i];
+		dsts = &um->addr[src->cnt];
+		for (i = 0; i < dst->cnt; i++) {
+			void *buf = dst->aligned[i];
 			struct page *pg = virt_to_page(buf);
 			unsigned long pg_off = offset_in_page(buf);
 
@@ -660,7 +768,7 @@
 			ret = dma_mapping_error(dev->dev, dsts[i]);
 			if (ret) {
 				result("dst mapping error", total_tests,
-				       src_off, dst_off, len, ret);
+				       src->off, dst->off, len, ret);
 				goto error_unmap_continue;
 			}
 			um->bidi_cnt++;
@@ -668,106 +776,119 @@
 
 		if (thread->type == DMA_MEMCPY)
 			tx = dev->device_prep_dma_memcpy(chan,
-							 dsts[0] + dst_off,
+							 dsts[0] + dst->off,
 							 srcs[0], len, flags);
 		else if (thread->type == DMA_MEMSET)
 			tx = dev->device_prep_dma_memset(chan,
-						dsts[0] + dst_off,
-						*(thread->srcs[0] + src_off),
+						dsts[0] + dst->off,
+						*(src->aligned[0] + src->off),
 						len, flags);
 		else if (thread->type == DMA_XOR)
 			tx = dev->device_prep_dma_xor(chan,
-						      dsts[0] + dst_off,
-						      srcs, src_cnt,
+						      dsts[0] + dst->off,
+						      srcs, src->cnt,
 						      len, flags);
 		else if (thread->type == DMA_PQ) {
-			for (i = 0; i < dst_cnt; i++)
-				dma_pq[i] = dsts[i] + dst_off;
+			for (i = 0; i < dst->cnt; i++)
+				dma_pq[i] = dsts[i] + dst->off;
 			tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
-						     src_cnt, pq_coefs,
+						     src->cnt, pq_coefs,
 						     len, flags);
 		}
 
 		if (!tx) {
-			result("prep error", total_tests, src_off,
-			       dst_off, len, ret);
+			result("prep error", total_tests, src->off,
+			       dst->off, len, ret);
 			msleep(100);
 			goto error_unmap_continue;
 		}
 
 		done->done = false;
-		tx->callback = dmatest_callback;
-		tx->callback_param = done;
+		if (!params->polled) {
+			tx->callback = dmatest_callback;
+			tx->callback_param = done;
+		}
 		cookie = tx->tx_submit(tx);
 
 		if (dma_submit_error(cookie)) {
-			result("submit error", total_tests, src_off,
-			       dst_off, len, ret);
+			result("submit error", total_tests, src->off,
+			       dst->off, len, ret);
 			msleep(100);
 			goto error_unmap_continue;
 		}
-		dma_async_issue_pending(chan);
 
-		wait_event_freezable_timeout(thread->done_wait, done->done,
-					     msecs_to_jiffies(params->timeout));
+		if (params->polled) {
+			status = dma_sync_wait(chan, cookie);
+			dmaengine_terminate_sync(chan);
+			if (status == DMA_COMPLETE)
+				done->done = true;
+		} else {
+			dma_async_issue_pending(chan);
 
-		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+			wait_event_freezable_timeout(thread->done_wait,
+					done->done,
+					msecs_to_jiffies(params->timeout));
+
+			status = dma_async_is_tx_complete(chan, cookie, NULL,
+							  NULL);
+		}
 
 		if (!done->done) {
-			dmaengine_unmap_put(um);
-			result("test timed out", total_tests, src_off, dst_off,
+			result("test timed out", total_tests, src->off, dst->off,
 			       len, 0);
 			goto error_unmap_continue;
-		} else if (status != DMA_COMPLETE) {
-			dmaengine_unmap_put(um);
+		} else if (status != DMA_COMPLETE &&
+			   !(dma_has_cap(DMA_COMPLETION_NO_ORDER,
+					 dev->cap_mask) &&
+			     status == DMA_OUT_OF_ORDER)) {
 			result(status == DMA_ERROR ?
 			       "completion error status" :
-			       "completion busy status", total_tests, src_off,
-			       dst_off, len, ret);
+			       "completion busy status", total_tests, src->off,
+			       dst->off, len, ret);
 			goto error_unmap_continue;
 		}
 
 		dmaengine_unmap_put(um);
 
 		if (params->noverify) {
-			verbose_result("test passed", total_tests, src_off,
-				       dst_off, len, 0);
+			verbose_result("test passed", total_tests, src->off,
+				       dst->off, len, 0);
 			continue;
 		}
 
 		start = ktime_get();
 		pr_debug("%s: verifying source buffer...\n", current->comm);
-		error_count = dmatest_verify(thread->srcs, 0, src_off,
+		error_count = dmatest_verify(src->aligned, 0, src->off,
 				0, PATTERN_SRC, true, is_memset);
-		error_count += dmatest_verify(thread->srcs, src_off,
-				src_off + len, src_off,
+		error_count += dmatest_verify(src->aligned, src->off,
+				src->off + len, src->off,
 				PATTERN_SRC | PATTERN_COPY, true, is_memset);
-		error_count += dmatest_verify(thread->srcs, src_off + len,
-				params->buf_size, src_off + len,
+		error_count += dmatest_verify(src->aligned, src->off + len,
+				buf_size, src->off + len,
 				PATTERN_SRC, true, is_memset);
 
 		pr_debug("%s: verifying dest buffer...\n", current->comm);
-		error_count += dmatest_verify(thread->dsts, 0, dst_off,
+		error_count += dmatest_verify(dst->aligned, 0, dst->off,
 				0, PATTERN_DST, false, is_memset);
 
-		error_count += dmatest_verify(thread->dsts, dst_off,
-				dst_off + len, src_off,
+		error_count += dmatest_verify(dst->aligned, dst->off,
+				dst->off + len, src->off,
 				PATTERN_SRC | PATTERN_COPY, false, is_memset);
 
-		error_count += dmatest_verify(thread->dsts, dst_off + len,
-				params->buf_size, dst_off + len,
+		error_count += dmatest_verify(dst->aligned, dst->off + len,
+				buf_size, dst->off + len,
 				PATTERN_DST, false, is_memset);
 
 		diff = ktime_sub(ktime_get(), start);
 		comparetime = ktime_add(comparetime, diff);
 
 		if (error_count) {
-			result("data error", total_tests, src_off, dst_off,
+			result("data error", total_tests, src->off, dst->off,
 			       len, error_count);
 			failed_tests++;
 		} else {
-			verbose_result("test passed", total_tests, src_off,
-				       dst_off, len, 0);
+			verbose_result("test passed", total_tests, src->off,
+				       dst->off, len, 0);
 		}
 
 		continue;
@@ -785,30 +906,22 @@
 	kfree(dma_pq);
 err_srcs_array:
 	kfree(srcs);
-err_dstbuf:
-	for (i = 0; thread->udsts[i]; i++)
-		kfree(thread->udsts[i]);
-	kfree(thread->udsts);
-err_udsts:
-	kfree(thread->dsts);
-err_dsts:
-err_srcbuf:
-	for (i = 0; thread->usrcs[i]; i++)
-		kfree(thread->usrcs[i]);
-	kfree(thread->usrcs);
-err_usrcs:
-	kfree(thread->srcs);
-err_srcs:
+err_dst:
+	dmatest_free_test_data(dst);
+err_src:
+	dmatest_free_test_data(src);
+err_free_coefs:
 	kfree(pq_coefs);
 err_thread_type:
-	pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n",
+	iops = dmatest_persec(runtime, total_tests);
+	pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n",
 		current->comm, total_tests, failed_tests,
-		dmatest_persec(runtime, total_tests),
+		FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops),
 		dmatest_KBs(runtime, total_len), ret);
 
 	/* terminate all transfers on specified channels */
 	if (ret || failed_tests)
-		dmaengine_terminate_all(chan);
+		dmaengine_terminate_sync(chan);
 
 	thread->done = true;
 	wake_up(&thread_wait);
@@ -832,7 +945,7 @@
 	}
 
 	/* terminate all transfers on specified channels */
-	dmaengine_terminate_all(dtc->chan);
+	dmaengine_terminate_sync(dtc->chan);
 
 	kfree(dtc);
 }
@@ -882,7 +995,7 @@
 		/* srcbuf and dstbuf are allocated by the thread itself */
 		get_task_struct(thread->task);
 		list_add_tail(&thread->node, &dtc->threads);
-		wake_up_process(thread->task);
+		thread->pending = true;
 	}
 
 	return i;
@@ -904,6 +1017,12 @@
 
 	dtc->chan = chan;
 	INIT_LIST_HEAD(&dtc->threads);
+
+	if (dma_has_cap(DMA_COMPLETION_NO_ORDER, dma_dev->cap_mask) &&
+	    info->params.polled) {
+		info->params.polled = false;
+		pr_warn("DMA_COMPLETION_NO_ORDER, polled disabled\n");
+	}
 
 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
 		if (dmatest == 0) {
@@ -928,7 +1047,7 @@
 		thread_count += cnt > 0 ? cnt : 0;
 	}
 
-	pr_info("Started %u threads using %s\n",
+	pr_info("Added %u threads using %s\n",
 		thread_count, dma_chan_name(chan));
 
 	list_add_tail(&dtc->node, &info->channels);
@@ -939,13 +1058,7 @@
 
 static bool filter(struct dma_chan *chan, void *param)
 {
-	struct dmatest_params *params = param;
-
-	if (!dmatest_match_channel(params, chan) ||
-	    !dmatest_match_device(params, chan->device))
-		return false;
-	else
-		return true;
+	return dmatest_match_channel(param, chan) && dmatest_match_device(param, chan->device);
 }
 
 static void request_channels(struct dmatest_info *info,
@@ -973,7 +1086,7 @@
 	}
 }
 
-static void run_threaded_test(struct dmatest_info *info)
+static void add_threaded_test(struct dmatest_info *info)
 {
 	struct dmatest_params *params = &info->params;
 
@@ -989,11 +1102,32 @@
 	params->timeout = timeout;
 	params->noverify = noverify;
 	params->norandom = norandom;
+	params->alignment = alignment;
+	params->transfer_size = transfer_size;
+	params->polled = polled;
 
 	request_channels(info, DMA_MEMCPY);
 	request_channels(info, DMA_MEMSET);
 	request_channels(info, DMA_XOR);
 	request_channels(info, DMA_PQ);
+}
+
+static void run_pending_tests(struct dmatest_info *info)
+{
+	struct dmatest_chan *dtc;
+	unsigned int thread_count = 0;
+
+	list_for_each_entry(dtc, &info->channels, node) {
+		struct dmatest_thread *thread;
+
+		thread_count = 0;
+		list_for_each_entry(thread, &dtc->threads, node) {
+			wake_up_process(thread->task);
+			thread_count++;
+		}
+		pr_info("Started %u threads using %s\n",
+			thread_count, dma_chan_name(dtc->chan));
+	}
 }
 
 static void stop_threaded_test(struct dmatest_info *info)
@@ -1012,7 +1146,7 @@
 	info->nr_channels = 0;
 }
 
-static void restart_threaded_test(struct dmatest_info *info, bool run)
+static void start_threaded_tests(struct dmatest_info *info)
 {
 	/* we might be called early to set run=, defer running until all
 	 * parameters have been evaluated
@@ -1020,11 +1154,7 @@
 	if (!info->did_init)
 		return;
 
-	/* Stop any running test first */
-	stop_threaded_test(info);
-
-	/* Run test with new parameters */
-	run_threaded_test(info);
+	run_pending_tests(info);
 }
 
 static int dmatest_run_get(char *val, const struct kernel_param *kp)
@@ -1035,7 +1165,8 @@
 	if (is_threaded_test_run(info)) {
 		dmatest_run = true;
 	} else {
-		stop_threaded_test(info);
+		if (!is_threaded_test_pending(info))
+			stop_threaded_test(info);
 		dmatest_run = false;
 	}
 	mutex_unlock(&info->lock);
@@ -1053,16 +1184,139 @@
 	if (ret) {
 		mutex_unlock(&info->lock);
 		return ret;
+	} else if (dmatest_run) {
+		if (!is_threaded_test_pending(info)) {
+			/*
+			 * We have nothing to run. This can be due to:
+			 */
+			ret = info->last_error;
+			if (ret) {
+				/* 1) Misconfiguration */
+				pr_err("Channel misconfigured, can't continue\n");
+				mutex_unlock(&info->lock);
+				return ret;
+			} else {
+				/* 2) We rely on defaults */
+				pr_info("No channels configured, continue with any\n");
+				if (!is_threaded_test_run(info))
+					stop_threaded_test(info);
+				add_threaded_test(info);
+			}
+		}
+		start_threaded_tests(info);
+	} else {
+		stop_threaded_test(info);
 	}
-
-	if (is_threaded_test_run(info))
-		ret = -EBUSY;
-	else if (dmatest_run)
-		restart_threaded_test(info, dmatest_run);
 
 	mutex_unlock(&info->lock);
 
 	return ret;
+}
+
+static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
+{
+	struct dmatest_info *info = &test_info;
+	struct dmatest_chan *dtc;
+	char chan_reset_val[20];
+	int ret;
+
+	mutex_lock(&info->lock);
+	ret = param_set_copystring(val, kp);
+	if (ret) {
+		mutex_unlock(&info->lock);
+		return ret;
+	}
+	/*Clear any previously run threads */
+	if (!is_threaded_test_run(info) && !is_threaded_test_pending(info))
+		stop_threaded_test(info);
+	/* Reject channels that are already registered */
+	if (is_threaded_test_pending(info)) {
+		list_for_each_entry(dtc, &info->channels, node) {
+			if (strcmp(dma_chan_name(dtc->chan),
+				   strim(test_channel)) == 0) {
+				dtc = list_last_entry(&info->channels,
+						      struct dmatest_chan,
+						      node);
+				strlcpy(chan_reset_val,
+					dma_chan_name(dtc->chan),
+					sizeof(chan_reset_val));
+				ret = -EBUSY;
+				goto add_chan_err;
+			}
+		}
+	}
+
+	add_threaded_test(info);
+
+	/* Check if channel was added successfully */
+	if (!list_empty(&info->channels)) {
+		/*
+		 * if new channel was not successfully added, revert the
+		 * "test_channel" string to the name of the last successfully
+		 * added channel. exception for when users issues empty string
+		 * to channel parameter.
+		 */
+		dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
+		if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
+		    && (strcmp("", strim(test_channel)) != 0)) {
+			ret = -EINVAL;
+			strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
+				sizeof(chan_reset_val));
+			goto add_chan_err;
+		}
+
+	} else {
+		/* Clear test_channel if no channels were added successfully */
+		strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
+		ret = -EBUSY;
+		goto add_chan_err;
+	}
+
+	info->last_error = ret;
+	mutex_unlock(&info->lock);
+
+	return ret;
+
+add_chan_err:
+	param_set_copystring(chan_reset_val, kp);
+	info->last_error = ret;
+	mutex_unlock(&info->lock);
+
+	return ret;
+}
+
+static int dmatest_chan_get(char *val, const struct kernel_param *kp)
+{
+	struct dmatest_info *info = &test_info;
+
+	mutex_lock(&info->lock);
+	if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
+		stop_threaded_test(info);
+		strlcpy(test_channel, "", sizeof(test_channel));
+	}
+	mutex_unlock(&info->lock);
+
+	return param_get_string(val, kp);
+}
+
+static int dmatest_test_list_get(char *val, const struct kernel_param *kp)
+{
+	struct dmatest_info *info = &test_info;
+	struct dmatest_chan *dtc;
+	unsigned int thread_count = 0;
+
+	list_for_each_entry(dtc, &info->channels, node) {
+		struct dmatest_thread *thread;
+
+		thread_count = 0;
+		list_for_each_entry(thread, &dtc->threads, node) {
+			thread_count++;
+		}
+		pr_info("%u threads using %s\n",
+			thread_count, dma_chan_name(dtc->chan));
+	}
+
+	return 0;
 }
 
 static int __init dmatest_init(void)
@@ -1072,7 +1326,8 @@
 
 	if (dmatest_run) {
 		mutex_lock(&info->lock);
-		run_threaded_test(info);
+		add_threaded_test(info);
+		run_pending_tests(info);
 		mutex_unlock(&info->lock);
 	}
 

--
Gitblit v1.6.2