x86, pmem: Fix cache flushing for iovec write < 8 bytes
authorBen Hutchings <ben.hutchings@codethink.co.uk>
Tue, 9 May 2017 17:00:43 +0000 (18:00 +0100)
committerDan Williams <dan.j.williams@intel.com>
Tue, 9 May 2017 17:09:26 +0000 (10:09 -0700)
Commit 11e63f6d920d added cache flushing for unaligned writes from an
iovec, covering the first and last cache line of a >= 8 byte write and
the first cache line of a < 8 byte write.  But an unaligned write of
2-7 bytes can still cover two cache lines, so make sure we flush both
in that case.

Cc: <stable@vger.kernel.org>
Fixes: 11e63f6d920d ("x86, pmem: fix broken __copy_user_nocache ...")
Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
arch/x86/include/asm/pmem.h

index d5a22bac99880212c8ea4a799cc15898a6265819..0ff8fe71b25543b3d9f65d7f81a4c9db29e3bd6f 100644 (file)
@@ -98,7 +98,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
 
                if (bytes < 8) {
                        if (!IS_ALIGNED(dest, 4) || (bytes != 4))
-                               arch_wb_cache_pmem(addr, 1);
+                               arch_wb_cache_pmem(addr, bytes);
                } else {
                        if (!IS_ALIGNED(dest, 8)) {
                                dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);