From e797af53b8814dfbc3c6ac134c528b8ab480f275 Mon Sep 17 00:00:00 2001 From: Jeeja KP Date: Fri, 13 Nov 2015 19:22:10 +0530 Subject: [PATCH] ASoC: Intel: Skylake: Fix CLDMA buffer wrap case When downloading the firmware/module, if the ring buffer boundary is reached, we need to wrap to the zeroth position. On next copy we need to copy till end of buffer and the remaining buffer needs to be copied from zeroth position. In this case copy was not handled correctly when wrap condition is reached which caused invalid data to be copied resulting in invalid hash failure. This patch fixes the issue by handling copy at the boundary condition correctly. Signed-off-by: Jeeja KP Signed-off-by: Vinod Koul Signed-off-by: Mark Brown --- sound/soc/intel/skylake/skl-sst-cldma.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c index 4ddabe30b62a..b03d9db0acad 100644 --- a/sound/soc/intel/skylake/skl-sst-cldma.c +++ b/sound/soc/intel/skylake/skl-sst-cldma.c @@ -180,6 +180,21 @@ static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size, ctx->cl_dev.dma_buffer_offset, trigger); dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos); + /* + * Check if the size exceeds buffer boundary. If it exceeds + * max_buffer size, then copy till buffer size and then copy + * remaining buffer from the start of ring buffer. + */ + if (ctx->cl_dev.dma_buffer_offset + size > ctx->cl_dev.bufsize) { + unsigned int size_b = ctx->cl_dev.bufsize - + ctx->cl_dev.dma_buffer_offset; + memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset, + curr_pos, size_b); + size -= size_b; + curr_pos += size_b; + ctx->cl_dev.dma_buffer_offset = 0; + } + memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset, curr_pos, size); -- 2.20.1