From b22da3d8526a935aa31e086e63f60ff3246cb61c Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 07:24:11 +0000
Subject: [PATCH] add stmac read mac form eeprom
---
kernel/sound/core/pcm_native.c | 1075 ++++++++++++++++++++++++++++++++++++++++-------------------
1 files changed, 733 insertions(+), 342 deletions(-)
diff --git a/kernel/sound/core/pcm_native.c b/kernel/sound/core/pcm_native.c
index 34ba470..6cc7c2a 100644
--- a/kernel/sound/core/pcm_native.c
+++ b/kernel/sound/core/pcm_native.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Digital Audio (PCM) abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
+#include <linux/compat.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/file.h>
@@ -28,6 +14,7 @@
#include <linux/pm_qos.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
@@ -85,71 +72,30 @@
*
*/
-static DEFINE_RWLOCK(snd_pcm_link_rwlock);
static DECLARE_RWSEM(snd_pcm_link_rwsem);
-/* Writer in rwsem may block readers even during its waiting in queue,
- * and this may lead to a deadlock when the code path takes read sem
- * twice (e.g. one in snd_pcm_action_nonatomic() and another in
- * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
- * sleep until all the readers are completed without blocking by writer.
- */
-static inline void down_write_nonfifo(struct rw_semaphore *lock)
+void snd_pcm_group_init(struct snd_pcm_group *group)
{
- while (!down_write_trylock(lock))
- msleep(1);
+ spin_lock_init(&group->lock);
+ mutex_init(&group->mutex);
+ INIT_LIST_HEAD(&group->substreams);
+ refcount_set(&group->refs, 1);
}
-#define PCM_LOCK_DEFAULT 0
-#define PCM_LOCK_IRQ 1
-#define PCM_LOCK_IRQSAVE 2
-
-static unsigned long __snd_pcm_stream_lock_mode(struct snd_pcm_substream *substream,
- unsigned int mode)
-{
- unsigned long flags = 0;
- if (substream->pcm->nonatomic) {
- down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
- mutex_lock(&substream->self_group.mutex);
- } else {
- switch (mode) {
- case PCM_LOCK_DEFAULT:
- read_lock(&snd_pcm_link_rwlock);
- break;
- case PCM_LOCK_IRQ:
- read_lock_irq(&snd_pcm_link_rwlock);
- break;
- case PCM_LOCK_IRQSAVE:
- read_lock_irqsave(&snd_pcm_link_rwlock, flags);
- break;
- }
- spin_lock(&substream->self_group.lock);
- }
- return flags;
+/* define group lock helpers */
+#define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
+static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
+{ \
+ if (nonatomic) \
+ mutex_ ## mutex_action(&group->mutex); \
+ else \
+ spin_ ## action(&group->lock); \
}
-static void __snd_pcm_stream_unlock_mode(struct snd_pcm_substream *substream,
- unsigned int mode, unsigned long flags)
-{
- if (substream->pcm->nonatomic) {
- mutex_unlock(&substream->self_group.mutex);
- up_read(&snd_pcm_link_rwsem);
- } else {
- spin_unlock(&substream->self_group.lock);
-
- switch (mode) {
- case PCM_LOCK_DEFAULT:
- read_unlock(&snd_pcm_link_rwlock);
- break;
- case PCM_LOCK_IRQ:
- read_unlock_irq(&snd_pcm_link_rwlock);
- break;
- case PCM_LOCK_IRQSAVE:
- read_unlock_irqrestore(&snd_pcm_link_rwlock, flags);
- break;
- }
- }
-}
+DEFINE_PCM_GROUP_LOCK(lock, lock);
+DEFINE_PCM_GROUP_LOCK(unlock, unlock);
+DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
+DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
/**
* snd_pcm_stream_lock - Lock the PCM stream
@@ -161,19 +107,19 @@
*/
void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
{
- __snd_pcm_stream_lock_mode(substream, PCM_LOCK_DEFAULT);
+ snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
/**
- * snd_pcm_stream_lock - Unlock the PCM stream
+ * snd_pcm_stream_unlock - Unlock the PCM stream
* @substream: PCM substream
*
* This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
*/
void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
{
- __snd_pcm_stream_unlock_mode(substream, PCM_LOCK_DEFAULT, 0);
+ snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
@@ -187,9 +133,20 @@
*/
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
{
- __snd_pcm_stream_lock_mode(substream, PCM_LOCK_IRQ);
+ snd_pcm_group_lock_irq(&substream->self_group,
+ substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
+
+static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_group *group = &substream->self_group;
+
+ if (substream->pcm->nonatomic)
+ mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING);
+ else
+ spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
+}
/**
* snd_pcm_stream_unlock_irq - Unlock the PCM stream
@@ -199,13 +156,19 @@
*/
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
{
- __snd_pcm_stream_unlock_mode(substream, PCM_LOCK_IRQ, 0);
+ snd_pcm_group_unlock_irq(&substream->self_group,
+ substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
{
- return __snd_pcm_stream_lock_mode(substream, PCM_LOCK_IRQSAVE);
+ unsigned long flags = 0;
+ if (substream->pcm->nonatomic)
+ mutex_lock(&substream->self_group.mutex);
+ else
+ spin_lock_irqsave(&substream->self_group.lock, flags);
+ return flags;
}
EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
@@ -219,9 +182,22 @@
void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags)
{
- __snd_pcm_stream_unlock_mode(substream, PCM_LOCK_IRQSAVE, flags);
+ if (substream->pcm->nonatomic)
+ mutex_unlock(&substream->self_group.mutex);
+ else
+ spin_unlock_irqrestore(&substream->self_group.lock, flags);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
+
+/* Run PCM ioctl ops */
+static int snd_pcm_ops_ioctl(struct snd_pcm_substream *substream,
+ unsigned cmd, void *arg)
+{
+ if (substream->ops->ioctl)
+ return substream->ops->ioctl(substream, cmd, arg);
+ else
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
{
@@ -262,17 +238,29 @@
return err;
}
+/* macro for simplified cast */
+#define PARAM_MASK_BIT(b) (1U << (__force int)(b))
+
static bool hw_support_mmap(struct snd_pcm_substream *substream)
{
if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
return false;
- /* architecture supports dma_mmap_coherent()? */
-#if defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) || !defined(CONFIG_HAS_DMA)
- if (!substream->ops->mmap &&
- substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
- return false;
-#endif
- return true;
+
+ if (substream->ops->mmap || substream->ops->page)
+ return true;
+
+ switch (substream->dma_buffer.dev.type) {
+ case SNDRV_DMA_TYPE_UNKNOWN:
+ /* we can't know the device, so just assume that the driver does
+ * everything right
+ */
+ return true;
+ case SNDRV_DMA_TYPE_CONTINUOUS:
+ case SNDRV_DMA_TYPE_VMALLOC:
+ return true;
+ default:
+ return dma_can_mmap(substream->dma_buffer.dev.dev);
+ }
}
static int constrain_mask_params(struct snd_pcm_substream *substream,
@@ -291,7 +279,7 @@
return -EINVAL;
/* This parameter is not requested to change by a caller. */
- if (!(params->rmask & (1 << k)))
+ if (!(params->rmask & PARAM_MASK_BIT(k)))
continue;
if (trace_hw_mask_param_enabled())
@@ -305,7 +293,7 @@
/* Set corresponding flag so that the caller gets it. */
trace_hw_mask_param(substream, k, 0, &old_mask, m);
- params->cmask |= 1 << k;
+ params->cmask |= PARAM_MASK_BIT(k);
}
return 0;
@@ -327,7 +315,7 @@
return -EINVAL;
/* This parameter is not requested to change by a caller. */
- if (!(params->rmask & (1 << k)))
+ if (!(params->rmask & PARAM_MASK_BIT(k)))
continue;
if (trace_hw_interval_param_enabled())
@@ -341,7 +329,7 @@
/* Set corresponding flag so that the caller gets it. */
trace_hw_interval_param(substream, k, 0, &old_interval, i);
- params->cmask |= 1 << k;
+ params->cmask |= PARAM_MASK_BIT(k);
}
return 0;
@@ -383,7 +371,7 @@
* have 0 so that the parameters are never changed anymore.
*/
for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
- vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
+ vstamps[k] = (params->rmask & PARAM_MASK_BIT(k)) ? 1 : 0;
/* Due to the above design, actual sequence number starts at 2. */
stamp = 2;
@@ -451,7 +439,7 @@
hw_param_interval(params, r->var));
}
- params->cmask |= (1 << r->var);
+ params->cmask |= PARAM_MASK_BIT(r->var);
vstamps[r->var] = stamp;
again = true;
}
@@ -493,8 +481,9 @@
m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
if (snd_mask_single(m) && snd_interval_single(i)) {
- err = substream->ops->ioctl(substream,
- SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
+ err = snd_pcm_ops_ioctl(substream,
+ SNDRV_PCM_IOCTL1_FIFO_SIZE,
+ params);
if (err < 0)
return err;
}
@@ -519,9 +508,9 @@
params->info = 0;
params->fifo_size = 0;
- if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
+ if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
params->msbits = 0;
- if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
+ if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_RATE)) {
params->rate_num = 0;
params->rate_den = 0;
}
@@ -584,7 +573,8 @@
return usecs;
}
-static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_set_state(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
snd_pcm_stream_lock_irq(substream);
if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
@@ -602,8 +592,19 @@
#endif
}
+void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq)
+{
+ if (substream->runtime && substream->runtime->stop_operating) {
+ substream->runtime->stop_operating = false;
+ if (substream->ops && substream->ops->sync_stop)
+ substream->ops->sync_stop(substream);
+ else if (sync_irq && substream->pcm->card->sync_irq > 0)
+ synchronize_irq(substream->pcm->card->sync_irq);
+ }
+}
+
/**
- * snd_pcm_hw_param_choose - choose a configuration defined by @params
+ * snd_pcm_hw_params_choose - choose a configuration defined by @params
* @pcm: PCM instance
* @params: the hw_params instance
*
@@ -666,6 +667,30 @@
return 0;
}
+/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
+ * block the further r/w operations
+ */
+static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
+{
+ if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
+ return -EBUSY;
+ mutex_lock(&runtime->buffer_mutex);
+ return 0; /* keep buffer_mutex, unlocked by below */
+}
+
+/* release buffer_mutex and clear r/w access flag */
+static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
+{
+ mutex_unlock(&runtime->buffer_mutex);
+ atomic_inc(&runtime->buffer_accessing);
+}
+
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
+#define is_oss_stream(substream) ((substream)->oss.oss)
+#else
+#define is_oss_stream(substream) false
+#endif
+
static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -677,22 +702,27 @@
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
+ err = snd_pcm_buffer_access_lock(runtime);
+ if (err < 0)
+ return err;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_OPEN:
case SNDRV_PCM_STATE_SETUP:
case SNDRV_PCM_STATE_PREPARED:
+ if (!is_oss_stream(substream) &&
+ atomic_read(&substream->mmap_count))
+ err = -EBADFD;
break;
default:
- snd_pcm_stream_unlock_irq(substream);
- return -EBADFD;
+ err = -EBADFD;
+ break;
}
snd_pcm_stream_unlock_irq(substream);
-#if IS_ENABLED(CONFIG_SND_PCM_OSS)
- if (!substream->oss.oss)
-#endif
- if (atomic_read(&substream->mmap_count))
- return -EBADFD;
+ if (err)
+ goto unlock;
+
+ snd_pcm_sync_stop(substream, true);
params->rmask = ~0U;
err = snd_pcm_hw_refine(substream, params);
@@ -706,6 +736,14 @@
err = fixup_unreferenced_params(substream, params);
if (err < 0)
goto _error;
+
+ if (substream->managed_buffer_alloc) {
+ err = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(params));
+ if (err < 0)
+ goto _error;
+ runtime->buffer_changed = err > 0;
+ }
if (substream->ops->hw_params != NULL) {
err = substream->ops->hw_params(substream, params);
@@ -764,19 +802,26 @@
snd_pcm_timer_resolution_change(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
- if (pm_qos_request_active(&substream->latency_pm_qos_req))
- pm_qos_remove_request(&substream->latency_pm_qos_req);
+ if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
+ cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
if ((usecs = period_to_usecs(runtime)) >= 0)
- pm_qos_add_request(&substream->latency_pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY, usecs);
- return 0;
+ cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
+ usecs);
+ err = 0;
_error:
- /* hardware might be unusable from this time,
- so we force application to retry to set
- the correct hardware parameter settings */
- snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
- if (substream->ops->hw_free != NULL)
- substream->ops->hw_free(substream);
+ if (err) {
+ /* hardware might be unusable from this time,
+ * so we force application to retry to set
+ * the correct hardware parameter settings
+ */
+ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
+ if (substream->ops->hw_free != NULL)
+ substream->ops->hw_free(substream);
+ if (substream->managed_buffer_alloc)
+ snd_pcm_lib_free_pages(substream);
+ }
+ unlock:
+ snd_pcm_buffer_access_unlock(runtime);
return err;
}
@@ -801,6 +846,18 @@
return err;
}
+static int do_hw_free(struct snd_pcm_substream *substream)
+{
+ int result = 0;
+
+ snd_pcm_sync_stop(substream, true);
+ if (substream->ops->hw_free)
+ result = substream->ops->hw_free(substream);
+ if (substream->managed_buffer_alloc)
+ snd_pcm_lib_free_pages(substream);
+ return result;
+}
+
static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
@@ -809,22 +866,28 @@
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
+ result = snd_pcm_buffer_access_lock(runtime);
+ if (result < 0)
+ return result;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_SETUP:
case SNDRV_PCM_STATE_PREPARED:
+ if (atomic_read(&substream->mmap_count))
+ result = -EBADFD;
break;
default:
- snd_pcm_stream_unlock_irq(substream);
- return -EBADFD;
+ result = -EBADFD;
+ break;
}
snd_pcm_stream_unlock_irq(substream);
- if (atomic_read(&substream->mmap_count))
- return -EBADFD;
- if (substream->ops->hw_free)
- result = substream->ops->hw_free(substream);
+ if (result)
+ goto unlock;
+ result = do_hw_free(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
- pm_qos_remove_request(&substream->latency_pm_qos_req);
+ cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
+ unlock:
+ snd_pcm_buffer_access_unlock(runtime);
return result;
}
@@ -908,8 +971,8 @@
return delay + substream->runtime->delay;
}
-int snd_pcm_status(struct snd_pcm_substream *substream,
- struct snd_pcm_status *status)
+int snd_pcm_status64(struct snd_pcm_substream *substream,
+ struct snd_pcm_status64 *status)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -935,14 +998,22 @@
status->suspended_state = runtime->status->suspended_state;
if (status->state == SNDRV_PCM_STATE_OPEN)
goto _end;
- status->trigger_tstamp = runtime->trigger_tstamp;
+ status->trigger_tstamp_sec = runtime->trigger_tstamp.tv_sec;
+ status->trigger_tstamp_nsec = runtime->trigger_tstamp.tv_nsec;
if (snd_pcm_running(substream)) {
snd_pcm_update_hw_ptr(substream);
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
- status->tstamp = runtime->status->tstamp;
- status->driver_tstamp = runtime->driver_tstamp;
- status->audio_tstamp =
- runtime->status->audio_tstamp;
+ status->tstamp_sec = runtime->status->tstamp.tv_sec;
+ status->tstamp_nsec =
+ runtime->status->tstamp.tv_nsec;
+ status->driver_tstamp_sec =
+ runtime->driver_tstamp.tv_sec;
+ status->driver_tstamp_nsec =
+ runtime->driver_tstamp.tv_nsec;
+ status->audio_tstamp_sec =
+ runtime->status->audio_tstamp.tv_sec;
+ status->audio_tstamp_nsec =
+ runtime->status->audio_tstamp.tv_nsec;
if (runtime->audio_tstamp_report.valid == 1)
/* backwards compatibility, no report provided in COMPAT mode */
snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
@@ -953,8 +1024,13 @@
}
} else {
/* get tstamp only in fallback mode and only if enabled */
- if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
- snd_pcm_gettime(runtime, &status->tstamp);
+ if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
+ struct timespec64 tstamp;
+
+ snd_pcm_gettime(runtime, &tstamp);
+ status->tstamp_sec = tstamp.tv_sec;
+ status->tstamp_nsec = tstamp.tv_nsec;
+ }
}
_tstamp_end:
status->appl_ptr = runtime->control->appl_ptr;
@@ -971,11 +1047,11 @@
return 0;
}
-static int snd_pcm_status_user(struct snd_pcm_substream *substream,
- struct snd_pcm_status __user * _status,
- bool ext)
+static int snd_pcm_status_user64(struct snd_pcm_substream *substream,
+ struct snd_pcm_status64 __user * _status,
+ bool ext)
{
- struct snd_pcm_status status;
+ struct snd_pcm_status64 status;
int res;
memset(&status, 0, sizeof(status));
@@ -987,11 +1063,60 @@
if (ext && get_user(status.audio_tstamp_data,
(u32 __user *)(&_status->audio_tstamp_data)))
return -EFAULT;
- res = snd_pcm_status(substream, &status);
+ res = snd_pcm_status64(substream, &status);
if (res < 0)
return res;
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
+ return 0;
+}
+
+static int snd_pcm_status_user32(struct snd_pcm_substream *substream,
+ struct snd_pcm_status32 __user * _status,
+ bool ext)
+{
+ struct snd_pcm_status64 status64;
+ struct snd_pcm_status32 status32;
+ int res;
+
+ memset(&status64, 0, sizeof(status64));
+ memset(&status32, 0, sizeof(status32));
+ /*
+ * with extension, parameters are read/write,
+ * get audio_tstamp_data from user,
+ * ignore rest of status structure
+ */
+ if (ext && get_user(status64.audio_tstamp_data,
+ (u32 __user *)(&_status->audio_tstamp_data)))
+ return -EFAULT;
+ res = snd_pcm_status64(substream, &status64);
+ if (res < 0)
+ return res;
+
+ status32 = (struct snd_pcm_status32) {
+ .state = status64.state,
+ .trigger_tstamp_sec = status64.trigger_tstamp_sec,
+ .trigger_tstamp_nsec = status64.trigger_tstamp_nsec,
+ .tstamp_sec = status64.tstamp_sec,
+ .tstamp_nsec = status64.tstamp_nsec,
+ .appl_ptr = status64.appl_ptr,
+ .hw_ptr = status64.hw_ptr,
+ .delay = status64.delay,
+ .avail = status64.avail,
+ .avail_max = status64.avail_max,
+ .overrange = status64.overrange,
+ .suspended_state = status64.suspended_state,
+ .audio_tstamp_data = status64.audio_tstamp_data,
+ .audio_tstamp_sec = status64.audio_tstamp_sec,
+ .audio_tstamp_nsec = status64.audio_tstamp_nsec,
+ .driver_tstamp_sec = status64.audio_tstamp_sec,
+ .driver_tstamp_nsec = status64.audio_tstamp_nsec,
+ .audio_tstamp_accuracy = status64.audio_tstamp_accuracy,
+ };
+
+ if (copy_to_user(_status, &status32, sizeof(status32)))
+ return -EFAULT;
+
return 0;
}
@@ -1013,7 +1138,7 @@
return -EINVAL;
memset(info, 0, sizeof(*info));
info->channel = channel;
- return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
+ return snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
}
static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
@@ -1047,11 +1172,17 @@
runtime->trigger_master = NULL;
}
+#define ACTION_ARG_IGNORE (__force snd_pcm_state_t)0
+
struct action_ops {
- int (*pre_action)(struct snd_pcm_substream *substream, int state);
- int (*do_action)(struct snd_pcm_substream *substream, int state);
- void (*undo_action)(struct snd_pcm_substream *substream, int state);
- void (*post_action)(struct snd_pcm_substream *substream, int state);
+ int (*pre_action)(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state);
+ int (*do_action)(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state);
+ void (*undo_action)(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state);
+ void (*post_action)(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state);
};
/*
@@ -1061,15 +1192,18 @@
*/
static int snd_pcm_action_group(const struct action_ops *ops,
struct snd_pcm_substream *substream,
- int state, int do_lock)
+ snd_pcm_state_t state,
+ bool stream_lock)
{
struct snd_pcm_substream *s = NULL;
struct snd_pcm_substream *s1;
int res = 0, depth = 1;
snd_pcm_group_for_each_entry(s, substream) {
- if (do_lock && s != substream) {
- if (s->pcm->nonatomic)
+ if (s != substream) {
+ if (!stream_lock)
+ mutex_lock_nested(&s->runtime->buffer_mutex, depth);
+ else if (s->pcm->nonatomic)
mutex_lock_nested(&s->self_group.mutex, depth);
else
spin_lock_nested(&s->self_group.lock, depth);
@@ -1097,18 +1231,18 @@
ops->post_action(s, state);
}
_unlock:
- if (do_lock) {
- /* unlock streams */
- snd_pcm_group_for_each_entry(s1, substream) {
- if (s1 != substream) {
- if (s1->pcm->nonatomic)
- mutex_unlock(&s1->self_group.mutex);
- else
- spin_unlock(&s1->self_group.lock);
- }
- if (s1 == s) /* end */
- break;
+ /* unlock streams */
+ snd_pcm_group_for_each_entry(s1, substream) {
+ if (s1 != substream) {
+ if (!stream_lock)
+ mutex_unlock(&s1->runtime->buffer_mutex);
+ else if (s1->pcm->nonatomic)
+ mutex_unlock(&s1->self_group.mutex);
+ else
+ spin_unlock(&s1->self_group.lock);
}
+ if (s1 == s) /* end */
+ break;
}
return res;
}
@@ -1118,7 +1252,7 @@
*/
static int snd_pcm_action_single(const struct action_ops *ops,
struct snd_pcm_substream *substream,
- int state)
+ snd_pcm_state_t state)
{
int res;
@@ -1133,35 +1267,83 @@
return res;
}
+static void snd_pcm_group_assign(struct snd_pcm_substream *substream,
+ struct snd_pcm_group *new_group)
+{
+ substream->group = new_group;
+ list_move(&substream->link_list, &new_group->substreams);
+}
+
+/*
+ * Unref and unlock the group, but keep the stream lock;
+ * when the group becomes empty and no longer referred, destroy itself
+ */
+static void snd_pcm_group_unref(struct snd_pcm_group *group,
+ struct snd_pcm_substream *substream)
+{
+ bool do_free;
+
+ if (!group)
+ return;
+ do_free = refcount_dec_and_test(&group->refs);
+ snd_pcm_group_unlock(group, substream->pcm->nonatomic);
+ if (do_free)
+ kfree(group);
+}
+
+/*
+ * Lock the group inside a stream lock and reference it;
+ * return the locked group object, or NULL if not linked
+ */
+static struct snd_pcm_group *
+snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
+{
+ bool nonatomic = substream->pcm->nonatomic;
+ struct snd_pcm_group *group;
+ bool trylock;
+
+ for (;;) {
+ if (!snd_pcm_stream_linked(substream))
+ return NULL;
+ group = substream->group;
+ /* block freeing the group object */
+ refcount_inc(&group->refs);
+
+ trylock = nonatomic ? mutex_trylock(&group->mutex) :
+ spin_trylock(&group->lock);
+ if (trylock)
+ break; /* OK */
+
+ /* re-lock for avoiding ABBA deadlock */
+ snd_pcm_stream_unlock(substream);
+ snd_pcm_group_lock(group, nonatomic);
+ snd_pcm_stream_lock(substream);
+
+ /* check the group again; the above opens a small race window */
+ if (substream->group == group)
+ break; /* OK */
+ /* group changed, try again */
+ snd_pcm_group_unref(group, substream);
+ }
+ return group;
+}
+
/*
* Note: call with stream lock
*/
static int snd_pcm_action(const struct action_ops *ops,
struct snd_pcm_substream *substream,
- int state)
+ snd_pcm_state_t state)
{
+ struct snd_pcm_group *group;
int res;
- if (!snd_pcm_stream_linked(substream))
- return snd_pcm_action_single(ops, substream, state);
-
- if (substream->pcm->nonatomic) {
- if (!mutex_trylock(&substream->group->mutex)) {
- mutex_unlock(&substream->self_group.mutex);
- mutex_lock(&substream->group->mutex);
- mutex_lock(&substream->self_group.mutex);
- }
- res = snd_pcm_action_group(ops, substream, state, 1);
- mutex_unlock(&substream->group->mutex);
- } else {
- if (!spin_trylock(&substream->group->lock)) {
- spin_unlock(&substream->self_group.lock);
- spin_lock(&substream->group->lock);
- spin_lock(&substream->self_group.lock);
- }
- res = snd_pcm_action_group(ops, substream, state, 1);
- spin_unlock(&substream->group->lock);
- }
+ group = snd_pcm_stream_group_ref(substream);
+ if (group)
+ res = snd_pcm_action_group(ops, substream, state, true);
+ else
+ res = snd_pcm_action_single(ops, substream, state);
+ snd_pcm_group_unref(group, substream);
return res;
}
@@ -1170,7 +1352,7 @@
*/
static int snd_pcm_action_lock_irq(const struct action_ops *ops,
struct snd_pcm_substream *substream,
- int state)
+ snd_pcm_state_t state)
{
int res;
@@ -1184,15 +1366,21 @@
*/
static int snd_pcm_action_nonatomic(const struct action_ops *ops,
struct snd_pcm_substream *substream,
- int state)
+ snd_pcm_state_t state)
{
int res;
+ /* Guarantee the group members won't change during non-atomic action */
down_read(&snd_pcm_link_rwsem);
+ res = snd_pcm_buffer_access_lock(substream->runtime);
+ if (res < 0)
+ goto unlock;
if (snd_pcm_stream_linked(substream))
- res = snd_pcm_action_group(ops, substream, state, 0);
+ res = snd_pcm_action_group(ops, substream, state, false);
else
res = snd_pcm_action_single(ops, substream, state);
+ snd_pcm_buffer_access_unlock(substream->runtime);
+ unlock:
up_read(&snd_pcm_link_rwsem);
return res;
}
@@ -1200,7 +1388,8 @@
/*
* start callbacks
*/
-static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_start(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
@@ -1213,20 +1402,23 @@
return 0;
}
-static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_start(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
if (substream->runtime->trigger_master != substream)
return 0;
return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
}
-static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_undo_start(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
if (substream->runtime->trigger_master == substream)
substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
}
-static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_start(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_trigger_tstamp(substream);
@@ -1270,7 +1462,8 @@
/*
* stop callbacks
*/
-static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_stop(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
@@ -1279,15 +1472,19 @@
return 0;
}
-static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_stop(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
if (substream->runtime->trigger_master == substream &&
- snd_pcm_running(substream))
+ snd_pcm_running(substream)) {
substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
+ substream->runtime->stop_operating = true;
+ }
return 0; /* unconditonally stop all substreams */
}
-static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_stop(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->status->state != state) {
@@ -1357,14 +1554,17 @@
EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
/*
- * pause callbacks
+ * pause callbacks: pass boolean (to start pause or resume) as state argument
*/
-static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
+#define pause_pushed(state) (__force bool)(state)
+
+static int snd_pcm_pre_pause(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
return -ENOSYS;
- if (push) {
+ if (pause_pushed(state)) {
if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
return -EBADFD;
} else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
@@ -1373,13 +1573,14 @@
return 0;
}
-static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
+static int snd_pcm_do_pause(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
if (substream->runtime->trigger_master != substream)
return 0;
/* some drivers might use hw_ptr to recover from the pause -
update the hw_ptr now */
- if (push)
+ if (pause_pushed(state))
snd_pcm_update_hw_ptr(substream);
/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
* a delta between the current jiffies, this gives a large enough
@@ -1387,23 +1588,27 @@
*/
substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
return substream->ops->trigger(substream,
- push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
- SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
+ pause_pushed(state) ?
+ SNDRV_PCM_TRIGGER_PAUSE_PUSH :
+ SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
}
-static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
+static void snd_pcm_undo_pause(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
if (substream->runtime->trigger_master == substream)
substream->ops->trigger(substream,
- push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
+ pause_pushed(state) ?
+ SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
SNDRV_PCM_TRIGGER_PAUSE_PUSH);
}
-static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
+static void snd_pcm_post_pause(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_trigger_tstamp(substream);
- if (push) {
+ if (pause_pushed(state)) {
runtime->status->state = SNDRV_PCM_STATE_PAUSED;
snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
wake_up(&runtime->sleep);
@@ -1424,15 +1629,24 @@
/*
* Push/release the pause for all linked streams.
*/
-static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
+static int snd_pcm_pause(struct snd_pcm_substream *substream, bool push)
{
- return snd_pcm_action(&snd_pcm_action_pause, substream, push);
+ return snd_pcm_action(&snd_pcm_action_pause, substream,
+ (__force snd_pcm_state_t)push);
+}
+
+static int snd_pcm_pause_lock_irq(struct snd_pcm_substream *substream,
+ bool push)
+{
+ return snd_pcm_action_lock_irq(&snd_pcm_action_pause, substream,
+ (__force snd_pcm_state_t)push);
}
#ifdef CONFIG_PM
-/* suspend */
+/* suspend callback: state argument ignored */
-static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
switch (runtime->status->state) {
@@ -1448,7 +1662,8 @@
return 0;
}
-static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_suspend(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->trigger_master != substream)
@@ -1456,10 +1671,12 @@
if (! snd_pcm_running(substream))
return 0;
substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
+ runtime->stop_operating = true;
return 0; /* suspend unconditionally */
}
-static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_suspend(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_trigger_tstamp(substream);
@@ -1476,29 +1693,25 @@
.post_action = snd_pcm_post_suspend
};
-/**
+/*
* snd_pcm_suspend - trigger SUSPEND to all linked streams
* @substream: the PCM substream
*
* After this call, all streams are changed to SUSPENDED state.
*
- * Return: Zero if successful (or @substream is %NULL), or a negative error
- * code.
+ * Return: Zero if successful, or a negative error code.
*/
-int snd_pcm_suspend(struct snd_pcm_substream *substream)
+static int snd_pcm_suspend(struct snd_pcm_substream *substream)
{
int err;
unsigned long flags;
- if (! substream)
- return 0;
-
snd_pcm_stream_lock_irqsave(substream, flags);
- err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
+ err = snd_pcm_action(&snd_pcm_action_suspend, substream,
+ ACTION_ARG_IGNORE);
snd_pcm_stream_unlock_irqrestore(substream, flags);
return err;
}
-EXPORT_SYMBOL(snd_pcm_suspend);
/**
* snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
@@ -1535,13 +1748,20 @@
return err;
}
}
+
+ for (stream = 0; stream < 2; stream++)
+ for (substream = pcm->streams[stream].substream;
+ substream; substream = substream->next)
+ snd_pcm_sync_stop(substream, false);
+
return 0;
}
EXPORT_SYMBOL(snd_pcm_suspend_all);
-/* resume */
+/* resume callbacks: state argument ignored */
-static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
@@ -1550,7 +1770,8 @@
return 0;
}
-static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_resume(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->trigger_master != substream)
@@ -1563,14 +1784,16 @@
return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
}
-static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_undo_resume(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
if (substream->runtime->trigger_master == substream &&
snd_pcm_running(substream))
substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
}
-static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_resume(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_trigger_tstamp(substream);
@@ -1587,7 +1810,8 @@
static int snd_pcm_resume(struct snd_pcm_substream *substream)
{
- return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
+ return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream,
+ ACTION_ARG_IGNORE);
}
#else
@@ -1628,7 +1852,9 @@
/*
* reset ioctl
*/
-static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
+/* reset callbacks: state argument ignored */
+static int snd_pcm_pre_reset(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
switch (runtime->status->state) {
@@ -1642,27 +1868,33 @@
}
}
-static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_reset(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
+ int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
if (err < 0)
return err;
+ snd_pcm_stream_lock_irq(substream);
runtime->hw_ptr_base = 0;
runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
runtime->status->hw_ptr % runtime->period_size;
runtime->silence_start = runtime->status->hw_ptr;
runtime->silence_filled = 0;
+ snd_pcm_stream_unlock_irq(substream);
return 0;
}
-static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_reset(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_stream_lock_irq(substream);
runtime->control->appl_ptr = runtime->status->hw_ptr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
runtime->silence_size > 0)
snd_pcm_playback_silence(substream, ULONG_MAX);
+ snd_pcm_stream_unlock_irq(substream);
}
static const struct action_ops snd_pcm_action_reset = {
@@ -1673,17 +1905,20 @@
static int snd_pcm_reset(struct snd_pcm_substream *substream)
{
- return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
+ return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream,
+ ACTION_ARG_IGNORE);
}
/*
* prepare ioctl
*/
-/* we use the second argument for updating f_flags */
+/* pass f_flags as state argument */
static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
- int f_flags)
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
+ int f_flags = (__force int)state;
+
if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
return -EBADFD;
@@ -1693,16 +1928,19 @@
return 0;
}
-static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_prepare(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
int err;
+ snd_pcm_sync_stop(substream, true);
err = substream->ops->prepare(substream);
if (err < 0)
return err;
- return snd_pcm_do_reset(substream, 0);
+ return snd_pcm_do_reset(substream, state);
}
-static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_prepare(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->control->appl_ptr = runtime->status->hw_ptr;
@@ -1735,8 +1973,8 @@
snd_pcm_stream_lock_irq(substream);
switch (substream->runtime->status->state) {
case SNDRV_PCM_STATE_PAUSED:
- snd_pcm_pause(substream, 0);
- /* fallthru */
+ snd_pcm_pause(substream, false);
+ fallthrough;
case SNDRV_PCM_STATE_SUSPENDED:
snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
break;
@@ -1744,14 +1982,17 @@
snd_pcm_stream_unlock_irq(substream);
return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
- substream, f_flags);
+ substream,
+ (__force snd_pcm_state_t)f_flags);
}
/*
* drain ioctl
*/
-static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
+/* drain init callbacks: state argument ignored */
+static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
switch (runtime->status->state) {
@@ -1764,7 +2005,8 @@
return 0;
}
-static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
+static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -1790,7 +2032,9 @@
} else {
/* stop running stream */
if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
- int new_state = snd_pcm_capture_avail(runtime) > 0 ?
+ snd_pcm_state_t new_state;
+
+ new_state = snd_pcm_capture_avail(runtime) > 0 ?
SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
snd_pcm_do_stop(substream, new_state);
snd_pcm_post_stop(substream, new_state);
@@ -1806,7 +2050,8 @@
return 0;
}
-static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
+static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
{
}
@@ -1815,8 +2060,6 @@
.do_action = snd_pcm_do_drain_init,
.post_action = snd_pcm_post_drain_init
};
-
-static int snd_pcm_drop(struct snd_pcm_substream *substream);
/*
* Drain the stream(s).
@@ -1831,6 +2074,7 @@
struct snd_card *card;
struct snd_pcm_runtime *runtime;
struct snd_pcm_substream *s;
+ struct snd_pcm_group *group;
wait_queue_entry_t wait;
int result = 0;
int nonblock = 0;
@@ -1847,14 +2091,14 @@
} else if (substream->f_flags & O_NONBLOCK)
nonblock = 1;
- down_read(&snd_pcm_link_rwsem);
snd_pcm_stream_lock_irq(substream);
/* resume pause */
if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
- snd_pcm_pause(substream, 0);
+ snd_pcm_pause(substream, false);
/* pre-start/stop - all running streams are changed to DRAINING state */
- result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
+ result = snd_pcm_action(&snd_pcm_action_drain_init, substream,
+ ACTION_ARG_IGNORE);
if (result < 0)
goto unlock;
/* in non-blocking, we don't wait in ioctl but let caller poll */
@@ -1872,6 +2116,7 @@
}
/* find a substream to drain */
to_check = NULL;
+ group = snd_pcm_stream_group_ref(substream);
snd_pcm_group_for_each_entry(s, substream) {
if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
continue;
@@ -1881,12 +2126,13 @@
break;
}
}
+ snd_pcm_group_unref(group, substream);
if (!to_check)
break; /* all drained */
init_waitqueue_entry(&wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&to_check->sleep, &wait);
snd_pcm_stream_unlock_irq(substream);
- up_read(&snd_pcm_link_rwsem);
if (runtime->no_period_wakeup)
tout = MAX_SCHEDULE_TIMEOUT;
else {
@@ -1897,10 +2143,18 @@
}
tout = msecs_to_jiffies(tout * 1000);
}
- tout = schedule_timeout_interruptible(tout);
- down_read(&snd_pcm_link_rwsem);
+ tout = schedule_timeout(tout);
+
snd_pcm_stream_lock_irq(substream);
- remove_wait_queue(&to_check->sleep, &wait);
+ group = snd_pcm_stream_group_ref(substream);
+ snd_pcm_group_for_each_entry(s, substream) {
+ if (s->runtime == to_check) {
+ remove_wait_queue(&to_check->sleep, &wait);
+ break;
+ }
+ }
+ snd_pcm_group_unref(group, substream);
+
if (card->shutdown) {
result = -ENODEV;
break;
@@ -1920,7 +2174,6 @@
unlock:
snd_pcm_stream_unlock_irq(substream);
- up_read(&snd_pcm_link_rwsem);
return result;
}
@@ -1946,7 +2199,7 @@
snd_pcm_stream_lock_irq(substream);
/* resume pause */
if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
- snd_pcm_pause(substream, 0);
+ snd_pcm_pause(substream, false);
snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
/* runtime->control->appl_ptr = runtime->status->hw_ptr; */
@@ -1959,13 +2212,19 @@
static bool is_pcm_file(struct file *file)
{
struct inode *inode = file_inode(file);
+ struct snd_pcm *pcm;
unsigned int minor;
if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
return false;
minor = iminor(inode);
- return snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) ||
- snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
+ pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
+ if (!pcm)
+ pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
+ if (!pcm)
+ return false;
+ snd_card_unref(pcm->card);
+ return true;
}
/*
@@ -1976,7 +2235,8 @@
int res = 0;
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream1;
- struct snd_pcm_group *group;
+ struct snd_pcm_group *group, *target_group;
+ bool nonatomic = substream->pcm->nonatomic;
struct fd f = fdget(fd);
if (!f.file)
@@ -1987,18 +2247,20 @@
}
pcm_file = f.file->private_data;
substream1 = pcm_file->substream;
+
if (substream == substream1) {
res = -EINVAL;
goto _badf;
}
- group = kmalloc(sizeof(*group), GFP_KERNEL);
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
res = -ENOMEM;
goto _nolock;
}
- down_write_nonfifo(&snd_pcm_link_rwsem);
- write_lock_irq(&snd_pcm_link_rwlock);
+ snd_pcm_group_init(group);
+
+ down_write(&snd_pcm_link_rwsem);
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
substream->runtime->status->state != substream1->runtime->status->state ||
substream->pcm->nonatomic != substream1->pcm->nonatomic) {
@@ -2009,23 +2271,24 @@
res = -EALREADY;
goto _end;
}
+
+ snd_pcm_stream_lock_irq(substream);
if (!snd_pcm_stream_linked(substream)) {
- substream->group = group;
- group = NULL;
- spin_lock_init(&substream->group->lock);
- mutex_init(&substream->group->mutex);
- INIT_LIST_HEAD(&substream->group->substreams);
- list_add_tail(&substream->link_list, &substream->group->substreams);
- substream->group->count = 1;
+ snd_pcm_group_assign(substream, group);
+ group = NULL; /* assigned, don't free this one below */
}
- list_add_tail(&substream1->link_list, &substream->group->substreams);
- substream->group->count++;
- substream1->group = substream->group;
+ target_group = substream->group;
+ snd_pcm_stream_unlock_irq(substream);
+
+ snd_pcm_group_lock_irq(target_group, nonatomic);
+ snd_pcm_stream_lock_nested(substream1);
+ snd_pcm_group_assign(substream1, target_group);
+ refcount_inc(&target_group->refs);
+ snd_pcm_stream_unlock(substream1);
+ snd_pcm_group_unlock_irq(target_group, nonatomic);
_end:
- write_unlock_irq(&snd_pcm_link_rwlock);
up_write(&snd_pcm_link_rwsem);
_nolock:
- snd_card_unref(substream1->pcm->card);
kfree(group);
_badf:
fdput(f);
@@ -2034,34 +2297,44 @@
static void relink_to_local(struct snd_pcm_substream *substream)
{
- substream->group = &substream->self_group;
- INIT_LIST_HEAD(&substream->self_group.substreams);
- list_add_tail(&substream->link_list, &substream->self_group.substreams);
+ snd_pcm_stream_lock_nested(substream);
+ snd_pcm_group_assign(substream, &substream->self_group);
+ snd_pcm_stream_unlock(substream);
}
static int snd_pcm_unlink(struct snd_pcm_substream *substream)
{
- struct snd_pcm_substream *s;
+ struct snd_pcm_group *group;
+ bool nonatomic = substream->pcm->nonatomic;
+ bool do_free = false;
int res = 0;
- down_write_nonfifo(&snd_pcm_link_rwsem);
- write_lock_irq(&snd_pcm_link_rwlock);
+ down_write(&snd_pcm_link_rwsem);
+
if (!snd_pcm_stream_linked(substream)) {
res = -EALREADY;
goto _end;
}
- list_del(&substream->link_list);
- substream->group->count--;
- if (substream->group->count == 1) { /* detach the last stream, too */
- snd_pcm_group_for_each_entry(s, substream) {
- relink_to_local(s);
- break;
- }
- kfree(substream->group);
- }
+
+ group = substream->group;
+ snd_pcm_group_lock_irq(group, nonatomic);
+
relink_to_local(substream);
+ refcount_dec(&group->refs);
+
+ /* detach the last stream, too */
+ if (list_is_singular(&group->substreams)) {
+ relink_to_local(list_first_entry(&group->substreams,
+ struct snd_pcm_substream,
+ link_list));
+ do_free = refcount_dec_and_test(&group->refs);
+ }
+
+ snd_pcm_group_unlock_irq(group, nonatomic);
+ if (do_free)
+ kfree(group);
+
_end:
- write_unlock_irq(&snd_pcm_link_rwlock);
up_write(&snd_pcm_link_rwsem);
return res;
}
@@ -2110,21 +2383,21 @@
static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
- unsigned int k;
+ snd_pcm_format_t k;
const struct snd_interval *i =
hw_param_interval_c(params, rule->deps[0]);
struct snd_mask m;
struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
snd_mask_any(&m);
- for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
+ pcm_for_each_format(k) {
int bits;
- if (! snd_mask_test(mask, k))
+ if (!snd_mask_test_format(mask, k))
continue;
bits = snd_pcm_format_physical_width(k);
if (bits <= 0)
continue; /* ignore invalid formats */
if ((unsigned)bits < i->min || (unsigned)bits > i->max)
- snd_mask_reset(&m, k);
+ snd_mask_reset(&m, (__force unsigned)k);
}
return snd_mask_refine(mask, &m);
}
@@ -2133,14 +2406,15 @@
struct snd_pcm_hw_rule *rule)
{
struct snd_interval t;
- unsigned int k;
+ snd_pcm_format_t k;
+
t.min = UINT_MAX;
t.max = 0;
t.openmin = 0;
t.openmax = 0;
- for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
+ pcm_for_each_format(k) {
int bits;
- if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
+ if (!snd_mask_test_format(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
continue;
bits = snd_pcm_format_physical_width(k);
if (bits <= 0)
@@ -2190,7 +2464,7 @@
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
-int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
+static int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
@@ -2314,7 +2588,7 @@
return 0;
}
-int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
+static int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hardware *hw = &runtime->hw;
@@ -2322,16 +2596,16 @@
unsigned int mask = 0;
if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
- mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
+ mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_INTERLEAVED);
if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
- mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
+ mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_NONINTERLEAVED);
if (hw_support_mmap(substream)) {
if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
- mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
+ mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_INTERLEAVED);
if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
- mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
+ mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED);
if (hw->info & SNDRV_PCM_INFO_COMPLEX)
- mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
+ mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_COMPLEX);
}
err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
if (err < 0)
@@ -2341,7 +2615,8 @@
if (err < 0)
return err;
- err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
+ err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT,
+ PARAM_MASK_BIT(SNDRV_PCM_SUBFORMAT_STD));
if (err < 0)
return err;
@@ -2411,14 +2686,13 @@
snd_pcm_drop(substream);
if (substream->hw_opened) {
- if (substream->ops->hw_free &&
- substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
- substream->ops->hw_free(substream);
+ if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
+ do_hw_free(substream);
substream->ops->close(substream);
substream->hw_opened = 0;
}
- if (pm_qos_request_active(&substream->latency_pm_qos_req))
- pm_qos_remove_request(&substream->latency_pm_qos_req);
+ if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
+ cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
if (substream->pcm_release) {
substream->pcm_release(substream);
substream->pcm_release = NULL;
@@ -2486,10 +2760,8 @@
return -ENOMEM;
}
pcm_file->substream = substream;
- if (substream->ref_count == 1) {
- substream->file = pcm_file;
+ if (substream->ref_count == 1)
substream->pcm_release = pcm_release_private;
- }
file->private_data = pcm_file;
return 0;
@@ -2610,7 +2882,7 @@
case SNDRV_PCM_STATE_DRAINING:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
return -EBADFD;
- /* Fall through */
+ fallthrough;
case SNDRV_PCM_STATE_RUNNING:
return snd_pcm_update_hw_ptr(substream);
case SNDRV_PCM_STATE_PREPARED:
@@ -2738,7 +3010,6 @@
volatile struct snd_pcm_mmap_status *status;
volatile struct snd_pcm_mmap_control *control;
int err;
- snd_pcm_uframes_t hw_avail;
memset(&sync_ptr, 0, sizeof(sync_ptr));
if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
@@ -2767,16 +3038,6 @@
control->avail_min = sync_ptr.c.control.avail_min;
else
sync_ptr.c.control.avail_min = control->avail_min;
-
- if (runtime->render_flag & SNDRV_NON_DMA_MODE) {
- hw_avail = snd_pcm_playback_hw_avail(runtime);
- if ((hw_avail >= runtime->start_threshold)
- && (runtime->render_flag &
- SNDRV_RENDER_STOPPED)) {
- if (substream->ops->restart)
- substream->ops->restart(substream);
- }
- }
sync_ptr.s.status.state = status->state;
sync_ptr.s.status.hw_ptr = status->hw_ptr;
sync_ptr.s.status.tstamp = status->tstamp;
@@ -2787,6 +3048,112 @@
return -EFAULT;
return 0;
}
+
+struct snd_pcm_mmap_status32 {
+ snd_pcm_state_t state;
+ s32 pad1;
+ u32 hw_ptr;
+ s32 tstamp_sec;
+ s32 tstamp_nsec;
+ snd_pcm_state_t suspended_state;
+ s32 audio_tstamp_sec;
+ s32 audio_tstamp_nsec;
+} __attribute__((packed));
+
+struct snd_pcm_mmap_control32 {
+ u32 appl_ptr;
+ u32 avail_min;
+};
+
+struct snd_pcm_sync_ptr32 {
+ u32 flags;
+ union {
+ struct snd_pcm_mmap_status32 status;
+ unsigned char reserved[64];
+ } s;
+ union {
+ struct snd_pcm_mmap_control32 control;
+ unsigned char reserved[64];
+ } c;
+} __attribute__((packed));
+
+/* recalcuate the boundary within 32bit */
+static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime)
+{
+ snd_pcm_uframes_t boundary;
+
+ if (! runtime->buffer_size)
+ return 0;
+ boundary = runtime->buffer_size;
+ while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
+ boundary *= 2;
+ return boundary;
+}
+
+static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
+ struct snd_pcm_sync_ptr32 __user *src)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ volatile struct snd_pcm_mmap_status *status;
+ volatile struct snd_pcm_mmap_control *control;
+ u32 sflags;
+ struct snd_pcm_mmap_control scontrol;
+ struct snd_pcm_mmap_status sstatus;
+ snd_pcm_uframes_t boundary;
+ int err;
+
+ if (snd_BUG_ON(!runtime))
+ return -EINVAL;
+
+ if (get_user(sflags, &src->flags) ||
+ get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
+ get_user(scontrol.avail_min, &src->c.control.avail_min))
+ return -EFAULT;
+ if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
+ err = snd_pcm_hwsync(substream);
+ if (err < 0)
+ return err;
+ }
+ status = runtime->status;
+ control = runtime->control;
+ boundary = recalculate_boundary(runtime);
+ if (! boundary)
+ boundary = 0x7fffffff;
+ snd_pcm_stream_lock_irq(substream);
+ /* FIXME: we should consider the boundary for the sync from app */
+ if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) {
+ err = pcm_lib_apply_appl_ptr(substream,
+ scontrol.appl_ptr);
+ if (err < 0) {
+ snd_pcm_stream_unlock_irq(substream);
+ return err;
+ }
+ } else
+ scontrol.appl_ptr = control->appl_ptr % boundary;
+ if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
+ control->avail_min = scontrol.avail_min;
+ else
+ scontrol.avail_min = control->avail_min;
+ sstatus.state = status->state;
+ sstatus.hw_ptr = status->hw_ptr % boundary;
+ sstatus.tstamp = status->tstamp;
+ sstatus.suspended_state = status->suspended_state;
+ sstatus.audio_tstamp = status->audio_tstamp;
+ snd_pcm_stream_unlock_irq(substream);
+ if (put_user(sstatus.state, &src->s.status.state) ||
+ put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
+ put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp_sec) ||
+ put_user(sstatus.tstamp.tv_nsec, &src->s.status.tstamp_nsec) ||
+ put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
+ put_user(sstatus.audio_tstamp.tv_sec, &src->s.status.audio_tstamp_sec) ||
+ put_user(sstatus.audio_tstamp.tv_nsec, &src->s.status.audio_tstamp_nsec) ||
+ put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
+ put_user(scontrol.avail_min, &src->c.control.avail_min))
+ return -EFAULT;
+
+ return 0;
+}
+#define __SNDRV_PCM_IOCTL_SYNC_PTR32 _IOWR('A', 0x23, struct snd_pcm_sync_ptr32)
static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
{
@@ -2818,7 +3185,8 @@
result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
else
result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
- __put_user(result, &_xferi->result);
+ if (put_user(result, &_xferi->result))
+ return -EFAULT;
return result < 0 ? result : 0;
}
@@ -2847,7 +3215,8 @@
else
result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
kfree(bufs);
- __put_user(result, &_xfern->result);
+ if (put_user(result, &_xfern->result))
+ return -EFAULT;
return result < 0 ? result : 0;
}
@@ -2862,7 +3231,8 @@
if (put_user(0, _frames))
return -EFAULT;
result = snd_pcm_rewind(substream, frames);
- __put_user(result, _frames);
+ if (put_user(result, _frames))
+ return -EFAULT;
return result < 0 ? result : 0;
}
@@ -2877,7 +3247,8 @@
if (put_user(0, _frames))
return -EFAULT;
result = snd_pcm_forward(substream, frames);
- __put_user(result, _frames);
+ if (put_user(result, _frames))
+ return -EFAULT;
return result < 0 ? result : 0;
}
@@ -2917,10 +3288,14 @@
return snd_pcm_hw_free(substream);
case SNDRV_PCM_IOCTL_SW_PARAMS:
return snd_pcm_sw_params_user(substream, arg);
- case SNDRV_PCM_IOCTL_STATUS:
- return snd_pcm_status_user(substream, arg, false);
- case SNDRV_PCM_IOCTL_STATUS_EXT:
- return snd_pcm_status_user(substream, arg, true);
+ case SNDRV_PCM_IOCTL_STATUS32:
+ return snd_pcm_status_user32(substream, arg, false);
+ case SNDRV_PCM_IOCTL_STATUS_EXT32:
+ return snd_pcm_status_user32(substream, arg, true);
+ case SNDRV_PCM_IOCTL_STATUS64:
+ return snd_pcm_status_user64(substream, arg, false);
+ case SNDRV_PCM_IOCTL_STATUS_EXT64:
+ return snd_pcm_status_user64(substream, arg, true);
case SNDRV_PCM_IOCTL_CHANNEL_INFO:
return snd_pcm_channel_info_user(substream, arg);
case SNDRV_PCM_IOCTL_PREPARE:
@@ -2952,7 +3327,9 @@
return -EFAULT;
return 0;
}
- case SNDRV_PCM_IOCTL_SYNC_PTR:
+ case __SNDRV_PCM_IOCTL_SYNC_PTR32:
+ return snd_pcm_ioctl_sync_ptr_compat(substream, arg);
+ case __SNDRV_PCM_IOCTL_SYNC_PTR64:
return snd_pcm_sync_ptr(substream, arg);
#ifdef CONFIG_SND_SUPPORT_OLD_API
case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
@@ -2965,9 +3342,7 @@
case SNDRV_PCM_IOCTL_DROP:
return snd_pcm_drop(substream);
case SNDRV_PCM_IOCTL_PAUSE:
- return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
- substream,
- (int)(unsigned long)arg);
+ return snd_pcm_pause_lock_irq(substream, (unsigned long)arg);
case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
case SNDRV_PCM_IOCTL_READI_FRAMES:
return snd_pcm_xferi_frames_ioctl(substream, arg);
@@ -3290,8 +3665,6 @@
static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
{
- if (pcm_file->no_compat_mmap)
- return false;
/* See pcm_control_mmap_allowed() below.
* Since older alsa-lib requires both status and control mmaps to be
* coupled, we have to disable the status mmap for old alsa-lib, too.
@@ -3338,7 +3711,18 @@
snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
{
void *vaddr = substream->runtime->dma_area + ofs;
- return virt_to_page(vaddr);
+
+ switch (substream->dma_buffer.dev.type) {
+#ifdef CONFIG_SND_DMA_SGBUF
+ case SNDRV_DMA_TYPE_DEV_SG:
+ case SNDRV_DMA_TYPE_DEV_UC_SG:
+ return snd_pcm_sgbuf_ops_page(substream, ofs);
+#endif /* CONFIG_SND_DMA_SGBUF */
+ case SNDRV_DMA_TYPE_VMALLOC:
+ return vmalloc_to_page(vaddr);
+ default:
+ return virt_to_page(vaddr);
+ }
}
/*
@@ -3405,15 +3789,14 @@
area->vm_end - area->vm_start, area->vm_page_prot);
}
#endif /* CONFIG_GENERIC_ALLOCATOR */
-#ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
- substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
+ (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
+ substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
return dma_mmap_coherent(substream->dma_buffer.dev.dev,
area,
substream->runtime->dma_area,
substream->runtime->dma_addr,
substream->runtime->dma_bytes);
-#endif /* CONFIG_X86 */
/* mmap with fault handler */
area->vm_ops = &snd_pcm_vm_ops_data_fault;
return 0;
@@ -3504,11 +3887,19 @@
offset = area->vm_pgoff << PAGE_SHIFT;
switch (offset) {
- case SNDRV_PCM_MMAP_OFFSET_STATUS:
+ case SNDRV_PCM_MMAP_OFFSET_STATUS_OLD:
+ if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT))
+ return -ENXIO;
+ fallthrough;
+ case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW:
if (!pcm_status_mmap_allowed(pcm_file))
return -ENXIO;
return snd_pcm_mmap_status(substream, file, area);
- case SNDRV_PCM_MMAP_OFFSET_CONTROL:
+ case SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD:
+ if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT))
+ return -ENXIO;
+ fallthrough;
+ case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW:
if (!pcm_control_mmap_allowed(pcm_file))
return -ENXIO;
return snd_pcm_mmap_control(substream, file, area);
@@ -3668,9 +4059,9 @@
unsigned long offset = pgoff << PAGE_SHIFT;
switch (offset) {
- case SNDRV_PCM_MMAP_OFFSET_STATUS:
+ case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW:
return (unsigned long)runtime->status;
- case SNDRV_PCM_MMAP_OFFSET_CONTROL:
+ case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW:
return (unsigned long)runtime->control;
default:
return (unsigned long)runtime->dma_area + offset;
--
Gitblit v1.6.2