mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
ALSA: pcm: Fix races among concurrent hw_params and hw_free calls
Currently we have neither proper check nor protection against the concurrent calls of PCM hw_params and hw_free ioctls, which may result in a UAF. Since the existing PCM stream lock can't be used for protecting the whole ioctl operations, we need a new mutex to protect those racy calls. This patch introduced a new mutex, runtime->buffer_mutex, and applies it to both hw_params and hw_free ioctl code paths. Along with it, the both functions are slightly modified (the mmap_count check is moved into the state-check block) for code simplicity. Reported-by: Hu Jiahui <kirin.say@gmail.com> Cc: <stable@vger.kernel.org> Reviewed-by: Jaroslav Kysela <perex@perex.cz> Link: https://lore.kernel.org/r/20220322170720.3529-2-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
parent
646b907e15
commit
92ee3c60ec
@ -401,6 +401,7 @@ struct snd_pcm_runtime {
|
||||
wait_queue_head_t tsleep; /* transfer sleep */
|
||||
struct fasync_struct *fasync;
|
||||
bool stop_operating; /* sync_stop will be called */
|
||||
struct mutex buffer_mutex; /* protect for buffer changes */
|
||||
|
||||
/* -- private section -- */
|
||||
void *private_data;
|
||||
|
@ -969,6 +969,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
|
||||
init_waitqueue_head(&runtime->tsleep);
|
||||
|
||||
runtime->status->state = SNDRV_PCM_STATE_OPEN;
|
||||
mutex_init(&runtime->buffer_mutex);
|
||||
|
||||
substream->runtime = runtime;
|
||||
substream->private_data = pcm->private_data;
|
||||
@ -1002,6 +1003,7 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
|
||||
} else {
|
||||
substream->runtime = NULL;
|
||||
}
|
||||
mutex_destroy(&runtime->buffer_mutex);
|
||||
kfree(runtime);
|
||||
put_pid(substream->pid);
|
||||
substream->pid = NULL;
|
||||
|
@ -685,33 +685,40 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_SND_PCM_OSS)
|
||||
#define is_oss_stream(substream) ((substream)->oss.oss)
|
||||
#else
|
||||
#define is_oss_stream(substream) false
|
||||
#endif
|
||||
|
||||
static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
|
||||
struct snd_pcm_hw_params *params)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime;
|
||||
int err, usecs;
|
||||
int err = 0, usecs;
|
||||
unsigned int bits;
|
||||
snd_pcm_uframes_t frames;
|
||||
|
||||
if (PCM_RUNTIME_CHECK(substream))
|
||||
return -ENXIO;
|
||||
runtime = substream->runtime;
|
||||
mutex_lock(&runtime->buffer_mutex);
|
||||
snd_pcm_stream_lock_irq(substream);
|
||||
switch (runtime->status->state) {
|
||||
case SNDRV_PCM_STATE_OPEN:
|
||||
case SNDRV_PCM_STATE_SETUP:
|
||||
case SNDRV_PCM_STATE_PREPARED:
|
||||
if (!is_oss_stream(substream) &&
|
||||
atomic_read(&substream->mmap_count))
|
||||
err = -EBADFD;
|
||||
break;
|
||||
default:
|
||||
snd_pcm_stream_unlock_irq(substream);
|
||||
return -EBADFD;
|
||||
err = -EBADFD;
|
||||
break;
|
||||
}
|
||||
snd_pcm_stream_unlock_irq(substream);
|
||||
#if IS_ENABLED(CONFIG_SND_PCM_OSS)
|
||||
if (!substream->oss.oss)
|
||||
#endif
|
||||
if (atomic_read(&substream->mmap_count))
|
||||
return -EBADFD;
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
snd_pcm_sync_stop(substream, true);
|
||||
|
||||
@ -799,16 +806,21 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
|
||||
if (usecs >= 0)
|
||||
cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
|
||||
usecs);
|
||||
return 0;
|
||||
err = 0;
|
||||
_error:
|
||||
/* hardware might be unusable from this time,
|
||||
so we force application to retry to set
|
||||
the correct hardware parameter settings */
|
||||
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
|
||||
if (substream->ops->hw_free != NULL)
|
||||
substream->ops->hw_free(substream);
|
||||
if (substream->managed_buffer_alloc)
|
||||
snd_pcm_lib_free_pages(substream);
|
||||
if (err) {
|
||||
/* hardware might be unusable from this time,
|
||||
* so we force application to retry to set
|
||||
* the correct hardware parameter settings
|
||||
*/
|
||||
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
|
||||
if (substream->ops->hw_free != NULL)
|
||||
substream->ops->hw_free(substream);
|
||||
if (substream->managed_buffer_alloc)
|
||||
snd_pcm_lib_free_pages(substream);
|
||||
}
|
||||
unlock:
|
||||
mutex_unlock(&runtime->buffer_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -848,26 +860,31 @@ static int do_hw_free(struct snd_pcm_substream *substream)
|
||||
static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime;
|
||||
int result;
|
||||
int result = 0;
|
||||
|
||||
if (PCM_RUNTIME_CHECK(substream))
|
||||
return -ENXIO;
|
||||
runtime = substream->runtime;
|
||||
mutex_lock(&runtime->buffer_mutex);
|
||||
snd_pcm_stream_lock_irq(substream);
|
||||
switch (runtime->status->state) {
|
||||
case SNDRV_PCM_STATE_SETUP:
|
||||
case SNDRV_PCM_STATE_PREPARED:
|
||||
if (atomic_read(&substream->mmap_count))
|
||||
result = -EBADFD;
|
||||
break;
|
||||
default:
|
||||
snd_pcm_stream_unlock_irq(substream);
|
||||
return -EBADFD;
|
||||
result = -EBADFD;
|
||||
break;
|
||||
}
|
||||
snd_pcm_stream_unlock_irq(substream);
|
||||
if (atomic_read(&substream->mmap_count))
|
||||
return -EBADFD;
|
||||
if (result)
|
||||
goto unlock;
|
||||
result = do_hw_free(substream);
|
||||
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
|
||||
cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
|
||||
unlock:
|
||||
mutex_unlock(&runtime->buffer_mutex);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user