commit
ede34f397ddb063b145b9e7d79c6026f819ded13 upstream.
The fix for the racy writes and ioctls to sequencer widened the
application of client->ioctl_mutex to the whole write loop. Although
it does unlock/relock for the lengthy operation like the event dup,
the loop keeps the ioctl_mutex for the whole time in other
situations. This may take quite long time if the user-space would
give a huge buffer, and this is a likely cause of some weird behavior
spotted by syzcaller fuzzer.
This patch puts a simple workaround, just adding a mutex break in the
loop when a large number of events have been processed. This
shouldn't hit any performance drop because the threshold is set high
enough for usual operations.
Fixes:
7bd800915677 ("ALSA: seq: More protection for concurrent write and ioctl races")
Reported-by: syzbot+97aae04ce27e39cbfca9@syzkaller.appspotmail.com
Reported-by: syzbot+4c595632b98bb8ffcc66@syzkaller.appspotmail.com
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
{
struct snd_seq_client *client = file->private_data;
int written = 0, len;
- int err;
+ int err, handled;
struct snd_seq_event event;
if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
if (!client->accept_output || client->pool == NULL)
return -ENXIO;
+ repeat:
+ handled = 0;
/* allocate the pool now if the pool is not allocated yet */
mutex_lock(&client->ioctl_mutex);
if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
0, 0, &client->ioctl_mutex);
if (err < 0)
break;
+ handled++;
__skip_event:
/* Update pointers and counts */
count -= len;
buf += len;
written += len;
+
+ /* let's have a coffee break if too many events are queued */
+ if (++handled >= 200) {
+ mutex_unlock(&client->ioctl_mutex);
+ goto repeat;
+ }
}
out: