Merge tag 'v3.10.68' into update
authorStricted <info@stricted.net>
Wed, 21 Mar 2018 21:38:24 +0000 (22:38 +0100)
committerStricted <info@stricted.net>
Wed, 21 Mar 2018 21:38:24 +0000 (22:38 +0100)
This is the 3.10.68 stable release

44 files changed:
Makefile
arch/arm/include/asm/atomic.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/module.h
arch/arm/include/asm/page.h
arch/arm/include/asm/pgtable-3level-hwdef.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/kernel/head.S
arch/arm/kernel/module.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7-3level.S
arch/powerpc/xmon/xmon.c
drivers/firmware/efi/efi-pstore.c
drivers/gpio/gpiolib-of.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/input/serio/i8042-x86ia64io.h
drivers/md/dm-cache-metadata.c
drivers/md/dm-thin.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/ethernet/ti/cpsw.c
drivers/regulator/core.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-pxa2xx.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/loopback/tcm_loop.c
drivers/target/loopback/tcm_loop.h
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/vhost/scsi.c
fs/nfs/direct.c
fs/nfs/nfs4client.c
fs/pstore/inode.c
fs/pstore/ram.c
fs/pstore/ram_core.c
kernel/workqueue.c
net/wireless/nl80211.c
sound/core/seq/seq_dummy.c
sound/soc/codecs/wm8960.c

index 979af692d6d3bbf2ea9a09d9132c9c1e1634eb7d..31d88b19b8825f1229b4b0a196ba287c2ed7c101 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 10
-SUBLEVEL = 67
+SUBLEVEL = 68
 EXTRAVERSION =
 NAME = TOSSUG Baby Fish
 
index da1c77d39327963ab10e633aeb8809aac7da2dec..9ee7e01066f99088ed0e1d9943c25479e3d0f8d3 100644 (file)
@@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
 
 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 {
-       unsigned long oldval, res;
+       int oldval;
+       unsigned long res;
 
        smp_mb();
 
@@ -238,15 +239,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 
 #ifndef CONFIG_GENERIC_ATOMIC64
 typedef struct {
-       u64 __aligned(8) counter;
+       long long counter;
 } atomic64_t;
 
 #define ATOMIC64_INIT(i) { (i) }
 
 #ifdef CONFIG_ARM_LPAE
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
-       u64 result;
+       long long result;
 
        __asm__ __volatile__("@ atomic64_read\n"
 "      ldrd    %0, %H0, [%1]"
@@ -257,7 +258,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
        return result;
 }
 
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
 {
        __asm__ __volatile__("@ atomic64_set\n"
 "      strd    %2, %H2, [%1]"
@@ -266,9 +267,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
        );
 }
 #else
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
-       u64 result;
+       long long result;
 
        __asm__ __volatile__("@ atomic64_read\n"
 "      ldrexd  %0, %H0, [%1]"
@@ -279,9 +280,9 @@ static inline u64 atomic64_read(const atomic64_t *v)
        return result;
 }
 
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
 {
-       u64 tmp;
+       long long tmp;
 
        __asm__ __volatile__("@ atomic64_set\n"
 "1:    ldrexd  %0, %H0, [%2]\n"
@@ -294,9 +295,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
 }
 #endif
 
-static inline void atomic64_add(u64 i, atomic64_t *v)
+static inline void atomic64_add(long long i, atomic64_t *v)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        __asm__ __volatile__("@ atomic64_add\n"
@@ -311,9 +312,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
        : "cc");
 }
 
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        smp_mb();
@@ -334,9 +335,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
        return result;
 }
 
-static inline void atomic64_sub(u64 i, atomic64_t *v)
+static inline void atomic64_sub(long long i, atomic64_t *v)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        __asm__ __volatile__("@ atomic64_sub\n"
@@ -351,9 +352,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
        : "cc");
 }
 
-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        smp_mb();
@@ -374,9 +375,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
        return result;
 }
 
-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
+                                       long long new)
 {
-       u64 oldval;
+       long long oldval;
        unsigned long res;
 
        smp_mb();
@@ -398,9 +400,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
        return oldval;
 }
 
-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        smp_mb();
@@ -419,9 +421,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
        return result;
 }
 
-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        smp_mb();
@@ -445,9 +447,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
        return result;
 }
 
-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 {
-       u64 val;
+       long long val;
        unsigned long tmp;
        int ret = 1;
 
index 57870ab313c52cd103b327363b0191e76efad9f6..d847cbbcee459f6ab90cdfbfd4d432e3dd81ec89 100644 (file)
 #define TASK_UNMAPPED_BASE     UL(0x00000000)
 #endif
 
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET            UL(CONFIG_DRAM_BASE)
-#endif
-
 #ifndef END_MEM
 #define END_MEM                (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
 #endif
 
 #ifndef PAGE_OFFSET
-#define PAGE_OFFSET            (PHYS_OFFSET)
+#define PAGE_OFFSET            PLAT_PHYS_OFFSET
 #endif
 
 /*
  * The module can be at any place in ram in nommu mode.
  */
 #define MODULES_END            (END_MEM)
-#define MODULES_VADDR          (PHYS_OFFSET)
+#define MODULES_VADDR          PAGE_OFFSET
 
 #define XIP_VIRT_ADDR(physaddr)  (physaddr)
 
 #define page_to_phys(page)     (__pfn_to_phys(page_to_pfn(page)))
 #define phys_to_page(phys)     (pfn_to_page(__phys_to_pfn(phys)))
 
+/*
+ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
+ * memory.  This is used for XIP and NoMMU kernels, or by kernels which
+ * have their own mach/memory.h.  Assembly code must always use
+ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
+ */
+#ifndef PLAT_PHYS_OFFSET
+#define PLAT_PHYS_OFFSET       UL(CONFIG_PHYS_OFFSET)
+#endif
+
 #ifndef __ASSEMBLY__
 
 /*
@@ -183,22 +189,15 @@ static inline unsigned long __phys_to_virt(unsigned long x)
        return t;
 }
 #else
+
+#define PHYS_OFFSET    PLAT_PHYS_OFFSET
+
 #define __virt_to_phys(x)      ((x) - PAGE_OFFSET + PHYS_OFFSET)
 #define __phys_to_virt(x)      ((x) - PHYS_OFFSET + PAGE_OFFSET)
-#endif
-#endif
-#endif /* __ASSEMBLY__ */
 
-#ifndef PHYS_OFFSET
-#ifdef PLAT_PHYS_OFFSET
-#define PHYS_OFFSET    PLAT_PHYS_OFFSET
-#else
-#define PHYS_OFFSET    UL(CONFIG_PHYS_OFFSET)
 #endif
 #endif
 
-#ifndef __ASSEMBLY__
-
 /*
  * PFNs are used to describe any physical page; this means
  * PFN 0 == physical address 0.
@@ -207,7 +206,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
  * direct-mapped view.  We assume this is the first page
  * of RAM in the mem_map as well.
  */
-#define PHYS_PFN_OFFSET        (PHYS_OFFSET >> PAGE_SHIFT)
+#define PHYS_PFN_OFFSET        ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
 
 /*
  * These are *only* valid on the kernel direct mapped RAM memory.
@@ -275,7 +274,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
 #define ARCH_PFN_OFFSET                PHYS_PFN_OFFSET
 
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
+                                       && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
 
 #endif
 
index 0d3a28dbc8e5db05823e6b83f85ccc5d04b807ac..ed690c49ef93fae344aeaec993c939e336460283 100644 (file)
@@ -12,6 +12,8 @@ enum {
        ARM_SEC_CORE,
        ARM_SEC_EXIT,
        ARM_SEC_DEVEXIT,
+       ARM_SEC_HOT,
+       ARM_SEC_UNLIKELY,
        ARM_SEC_MAX,
 };
 
index cbdc7a21f869fcce44573119edb99c26d710c1a9..4355f0ec44d62e9b5d7c40132f28f0ff6710f387 100644 (file)
@@ -13,7 +13,7 @@
 /* PAGE_SHIFT determines the page size */
 #define PAGE_SHIFT             12
 #define PAGE_SIZE              (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK              (~(PAGE_SIZE-1))
+#define PAGE_MASK              (~((1 << PAGE_SHIFT) - 1))
 
 #ifndef __ASSEMBLY__
 
index 18f5cef82ad58988e1c8d9b77ab95eb995330bbf..add785b1ec0ab918f72d3c7e3eb90b56ff5a5baf 100644 (file)
@@ -68,6 +68,7 @@
 #define PTE_TYPE_PAGE          (_AT(pteval_t, 3) << 0)
 #define PTE_BUFFERABLE         (_AT(pteval_t, 1) << 2)         /* AttrIndx[0] */
 #define PTE_CACHEABLE          (_AT(pteval_t, 1) << 3)         /* AttrIndx[1] */
+#define PTE_AP2                        (_AT(pteval_t, 1) << 7)         /* AP[2] */
 #define PTE_EXT_SHARED         (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
 #define PTE_EXT_AF             (_AT(pteval_t, 1) << 10)        /* Access Flag */
 #define PTE_EXT_NG             (_AT(pteval_t, 1) << 11)        /* nG */
index 86b8fe398b9514d89a9032658f6bb3ad221b069e..bb017328c5bde298ba9019a9360109d48549cb1f 100644 (file)
@@ -33,7 +33,7 @@
 #define PTRS_PER_PMD           512
 #define PTRS_PER_PGD           4
 
-#define PTE_HWTABLE_PTRS       (PTRS_PER_PTE)
+#define PTE_HWTABLE_PTRS       (0)
 #define PTE_HWTABLE_OFF                (0)
 #define PTE_HWTABLE_SIZE       (PTRS_PER_PTE * sizeof(u64))
 
 #define PMD_SHIFT              21
 
 #define PMD_SIZE               (1UL << PMD_SHIFT)
-#define PMD_MASK               (~(PMD_SIZE-1))
+#define PMD_MASK               (~((1 << PMD_SHIFT) - 1))
 #define PGDIR_SIZE             (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK             (~(PGDIR_SIZE-1))
+#define PGDIR_MASK             (~((1 << PGDIR_SHIFT) - 1))
 
 /*
  * section address mask and size definitions.
  */
 #define SECTION_SHIFT          21
 #define SECTION_SIZE           (1UL << SECTION_SHIFT)
-#define SECTION_MASK           (~(SECTION_SIZE-1))
+#define SECTION_MASK           (~((1 << SECTION_SHIFT) - 1))
 
 #define USER_PTRS_PER_PGD      (PAGE_OFFSET / PGDIR_SIZE)
 
 #define L_PTE_PRESENT          (_AT(pteval_t, 3) << 0)         /* Present */
 #define L_PTE_FILE             (_AT(pteval_t, 1) << 2)         /* only when !PRESENT */
 #define L_PTE_USER             (_AT(pteval_t, 1) << 6)         /* AP[1] */
-#define L_PTE_RDONLY           (_AT(pteval_t, 1) << 7)         /* AP[2] */
 #define L_PTE_SHARED           (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
 #define L_PTE_YOUNG            (_AT(pteval_t, 1) << 10)        /* AF */
 #define L_PTE_XN               (_AT(pteval_t, 1) << 54)        /* XN */
-#define L_PTE_DIRTY            (_AT(pteval_t, 1) << 55)        /* unused */
-#define L_PTE_SPECIAL          (_AT(pteval_t, 1) << 56)        /* unused */
+#define L_PTE_DIRTY            (_AT(pteval_t, 1) << 55)
+#define L_PTE_SPECIAL          (_AT(pteval_t, 1) << 56)
 #define L_PTE_NONE             (_AT(pteval_t, 1) << 57)        /* PROT_NONE */
+#define L_PTE_RDONLY           (_AT(pteval_t, 1) << 58)        /* READ ONLY */
 
 /*
  * To be used in assembly code with the upper page attributes.
@@ -166,6 +166,23 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
                clean_pmd_entry(pmdp);  \
        } while (0)
 
+/*
+ * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
+ * that are written to a page table but not for ptes created with mk_pte.
+ *
+ * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
+ * hugetlb_cow, where it is compared with an entry in a page table.
+ * This comparison test fails erroneously leading ultimately to a memory leak.
+ *
+ * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
+ * present before running the comparison.
+ */
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(pte_a,pte_b)  ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG     \
+                                       : pte_val(pte_a))                               \
+                               == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG   \
+                                       : pte_val(pte_b)))
+
 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
 
 #endif /* __ASSEMBLY__ */
index 5aac06fcc97e43e80ad8ac568a04f41f3198a5a6..4043d7f4bc92d277c14bae0449e62f4eea606359 100644 (file)
@@ -211,12 +211,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 
 #define pte_clear(mm,addr,ptep)        set_pte_ext(ptep, __pte(0), 0)
 
+#define pte_isset(pte, val)    ((u32)(val) == (val) ? pte_val(pte) & (val) \
+                                               : !!(pte_val(pte) & (val)))
+#define pte_isclear(pte, val)  (!(pte_val(pte) & (val)))
+
 #define pte_none(pte)          (!pte_val(pte))
-#define pte_present(pte)       (pte_val(pte) & L_PTE_PRESENT)
-#define pte_write(pte)         (!(pte_val(pte) & L_PTE_RDONLY))
-#define pte_dirty(pte)         (pte_val(pte) & L_PTE_DIRTY)
-#define pte_young(pte)         (pte_val(pte) & L_PTE_YOUNG)
-#define pte_exec(pte)          (!(pte_val(pte) & L_PTE_XN))
+#define pte_present(pte)       (pte_isset((pte), L_PTE_PRESENT))
+#define pte_write(pte)         (pte_isclear((pte), L_PTE_RDONLY))
+#define pte_dirty(pte)         (pte_isset((pte), L_PTE_DIRTY))
+#define pte_young(pte)         (pte_isset((pte), L_PTE_YOUNG))
+#define pte_exec(pte)          (pte_isclear((pte), L_PTE_XN))
 #define pte_special(pte)       (0)
 
 #define pte_present_user(pte)  (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
index b42251ac516d2f2f8810e1eff2d6a039cbe73067..3a63c46be0ae889d113571ceec02e1f89197b1f2 100644 (file)
@@ -110,7 +110,7 @@ ENTRY(stext)
        sub     r4, r3, r4                      @ (PHYS_OFFSET - PAGE_OFFSET)
        add     r8, r8, r4                      @ PHYS_OFFSET
 #else
-       ldr     r8, =PHYS_OFFSET                @ always constant in this case
+       ldr     r8, =PLAT_PHYS_OFFSET           @ always constant in this case
 #endif
 
        /*
index b69ed0c44555c6cc83e46489b4466da92ee4d6c7..70bec79c2f3831d344e9eb63bb6d8dbab87230e7 100644 (file)
@@ -296,6 +296,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
                        maps[ARM_SEC_EXIT].unw_sec = s;
                else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
                        maps[ARM_SEC_DEVEXIT].unw_sec = s;
+               else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
+                       maps[ARM_SEC_UNLIKELY].unw_sec = s;
+               else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
+                       maps[ARM_SEC_HOT].unw_sec = s;
                else if (strcmp(".init.text", secname) == 0)
                        maps[ARM_SEC_INIT].txt_sec = s;
                else if (strcmp(".devinit.text", secname) == 0)
@@ -306,6 +310,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
                        maps[ARM_SEC_EXIT].txt_sec = s;
                else if (strcmp(".devexit.text", secname) == 0)
                        maps[ARM_SEC_DEVEXIT].txt_sec = s;
+               else if (strcmp(".text.unlikely", secname) == 0)
+                       maps[ARM_SEC_UNLIKELY].txt_sec = s;
+               else if (strcmp(".text.hot", secname) == 0)
+                       maps[ARM_SEC_HOT].txt_sec = s;
        }
 
        for (i = 0; i < ARM_SEC_MAX; i++)
index 4df2b7834ec8b1c1656af182210329eaa8f1e17c..9c5c1d2dced5c556a655b0316bffe41eda9edde4 100644 (file)
@@ -429,12 +429,21 @@ void __init dma_contiguous_remap(void)
                map.type = MT_MEMORY_DMA_READY;
 
                /*
-                * Clear previous low-memory mapping
+                * Clear previous low-memory mapping to ensure that the
+                * TLB does not see any conflicting entries, then flush
+                * the TLB of the old entries before creating new mappings.
+                *
+                * This ensures that any speculatively loaded TLB entries
+                * (even though they may be rare) can not cause any problems,
+                * and ensures that this code is architecturally compliant.
                 */
                for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
                     addr += PMD_SIZE)
                        pmd_clear(pmd_off_k(addr));
 
+               flush_tlb_kernel_range(__phys_to_virt(start),
+                                      __phys_to_virt(end));
+
                iotable_init(&map, 1);
        }
 }
index 54fc6e5c43703cb43354c0c4812dd1f0e790096c..f8e6abb67a7540489f556dc5a1cc4be18cd18b84 100644 (file)
@@ -708,8 +708,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
-       unsigned long end, unsigned long phys, const struct mem_type *type,
-       bool force_pages)
+                                 unsigned long end, phys_addr_t phys,
+                                 const struct mem_type *type, bool force_pages)
 {
        pud_t *pud = pud_offset(pgd, addr);
        unsigned long next;
index b45d73fbe547630db79cadd77ed49080326bc12d..29944d7e769a364ba98b6194ae4ce53cfec411f0 100644 (file)
@@ -78,8 +78,13 @@ ENTRY(cpu_v7_set_pte_ext)
        tst     rh, #1 << (57 - 32)             @ L_PTE_NONE
        bicne   rl, #L_PTE_VALID
        bne     1f
-       tst     rh, #1 << (55 - 32)             @ L_PTE_DIRTY
-       orreq   rl, #L_PTE_RDONLY
+
+       eor     ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
+                                       @ test for !L_PTE_DIRTY || L_PTE_RDONLY
+       tst     ip, #1 << (55 - 32) | 1 << (58 - 32)
+       orrne   rl, #PTE_AP2
+       biceq   rl, #PTE_AP2
+
 1:     strd    r2, r3, [r0]
        ALT_SMP(W(nop))
 #ifdef CONFIG_ARM_ERRATA_824069
index 94599a65cc669c44712de23a00561ff23b275b55..89e57280d2e259f5a7631bae05e1c9e131ea2fb8 100644 (file)
@@ -288,6 +288,7 @@ static inline void disable_surveillance(void)
        args.token = rtas_token("set-indicator");
        if (args.token == RTAS_UNKNOWN_SERVICE)
                return;
+       args.token = cpu_to_be32(args.token);
        args.nargs = cpu_to_be32(3);
        args.nret = cpu_to_be32(1);
        args.rets = &args.args[3];
index 202d2c85ba2e79e2db866e78a03dffa240967f25..9b2622e0a07e8800eb955b11fc4174110d6a2405 100644 (file)
@@ -38,6 +38,12 @@ struct pstore_read_data {
        char **buf;
 };
 
+static inline u64 generic_id(unsigned long timestamp,
+                            unsigned int part, int count)
+{
+       return (timestamp * 100 + part) * 1000 + count;
+}
+
 static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
 {
        efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
@@ -56,7 +62,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
 
        if (sscanf(name, "dump-type%u-%u-%d-%lu",
                   cb_data->type, &part, &cnt, &time) == 4) {
-               *cb_data->id = part;
+               *cb_data->id = generic_id(time, part, cnt);
                *cb_data->count = cnt;
                cb_data->timespec->tv_sec = time;
                cb_data->timespec->tv_nsec = 0;
@@ -67,7 +73,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
                 * which doesn't support holding
                 * multiple logs, remains.
                 */
-               *cb_data->id = part;
+               *cb_data->id = generic_id(time, part, 0);
                *cb_data->count = 0;
                cb_data->timespec->tv_sec = time;
                cb_data->timespec->tv_nsec = 0;
@@ -185,14 +191,16 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
        char name[DUMP_NAME_LEN];
        efi_char16_t efi_name[DUMP_NAME_LEN];
        int found, i;
+       unsigned int part;
 
-       sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
-               time.tv_sec);
+       do_div(id, 1000);
+       part = do_div(id, 100);
+       sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count, time.tv_sec);
 
        for (i = 0; i < DUMP_NAME_LEN; i++)
                efi_name[i] = name[i];
 
-       edata.id = id;
+       edata.id = part;
        edata.type = type;
        edata.count = count;
        edata.time = time;
index af909a20dd708d4cc471a0da635435d881a080e8..74769724c94a4b6cdf3a83a8f4dcd3f364a4ff29 100644 (file)
@@ -48,7 +48,7 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
                 * Return true to stop looking and return the translation
                 * error via out_gpio
                 */
-               gg_data->out_gpio = ERR_PTR(ret);
+               gg_data->out_gpio = ret;
                return true;
         }
 
index 5d4a4583d2df8b42b5f5fb9ab52c420441c28f31..8019e642d2f51d919619ef9818a534574c4c5744 100644 (file)
@@ -40,8 +40,15 @@ static DEFINE_MUTEX(device_list_mutex);
 static LIST_HEAD(device_list);
 static struct workqueue_struct *isert_rx_wq;
 static struct workqueue_struct *isert_comp_wq;
+static struct workqueue_struct *isert_release_wq;
 static struct kmem_cache *isert_cmd_cache;
 
+static int
+isert_rdma_post_recvl(struct isert_conn *isert_conn);
+static int
+isert_rdma_accept(struct isert_conn *isert_conn);
+struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+
 static void
 isert_qp_event_callback(struct ib_event *e, void *context)
 {
@@ -107,9 +114,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
        attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
        /*
         * FIXME: Use devattr.max_sge - 2 for max_send_sge as
-        * work-around for RDMA_READ..
+        * work-around for RDMA_READs with ConnectX-2.
+        *
+        * Also, still make sure to have at least two SGEs for
+        * outgoing control PDU responses.
         */
-       attr.cap.max_send_sge = devattr.max_sge - 2;
+       attr.cap.max_send_sge = max(2, devattr.max_sge - 2);
        isert_conn->max_sge = attr.cap.max_send_sge;
 
        attr.cap.max_recv_sge = 1;
@@ -124,12 +134,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
        ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
        if (ret) {
                pr_err("rdma_create_qp failed for cma_id %d\n", ret);
-               return ret;
+               goto err;
        }
        isert_conn->conn_qp = cma_id->qp;
        pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
 
        return 0;
+err:
+       mutex_lock(&device_list_mutex);
+       device->cq_active_qps[min_index]--;
+       mutex_unlock(&device_list_mutex);
+
+       return ret;
 }
 
 static void
@@ -212,6 +228,13 @@ isert_create_device_ib_res(struct isert_device *device)
        struct ib_device *ib_dev = device->ib_device;
        struct isert_cq_desc *cq_desc;
        int ret = 0, i, j;
+       int max_rx_cqe, max_tx_cqe;
+       struct ib_device_attr dev_attr;
+
+       memset(&dev_attr, 0, sizeof(struct ib_device_attr));
+       ret = isert_query_device(device->ib_device, &dev_attr);
+       if (ret)
+               return ret;
 
        device->cqs_used = min_t(int, num_online_cpus(),
                                 device->ib_device->num_comp_vectors);
@@ -234,6 +257,9 @@ isert_create_device_ib_res(struct isert_device *device)
                goto out_cq_desc;
        }
 
+       max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr.max_cqe);
+       max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr.max_cqe);
+
        for (i = 0; i < device->cqs_used; i++) {
                cq_desc[i].device = device;
                cq_desc[i].cq_index = i;
@@ -242,7 +268,7 @@ isert_create_device_ib_res(struct isert_device *device)
                                                isert_cq_rx_callback,
                                                isert_cq_event_callback,
                                                (void *)&cq_desc[i],
-                                               ISER_MAX_RX_CQ_LEN, i);
+                                               max_rx_cqe, i);
                if (IS_ERR(device->dev_rx_cq[i])) {
                        ret = PTR_ERR(device->dev_rx_cq[i]);
                        device->dev_rx_cq[i] = NULL;
@@ -253,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device)
                                                isert_cq_tx_callback,
                                                isert_cq_event_callback,
                                                (void *)&cq_desc[i],
-                                               ISER_MAX_TX_CQ_LEN, i);
+                                               max_tx_cqe, i);
                if (IS_ERR(device->dev_tx_cq[i])) {
                        ret = PTR_ERR(device->dev_tx_cq[i]);
                        device->dev_tx_cq[i] = NULL;
@@ -375,8 +401,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
 static int
 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
-       struct iscsi_np *np = cma_id->context;
-       struct isert_np *isert_np = np->np_context;
+       struct isert_np *isert_np = cma_id->context;
+       struct iscsi_np *np = isert_np->np;
        struct isert_conn *isert_conn;
        struct isert_device *device;
        struct ib_device *ib_dev = cma_id->device;
@@ -401,12 +427,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        isert_conn->state = ISER_CONN_INIT;
        INIT_LIST_HEAD(&isert_conn->conn_accept_node);
        init_completion(&isert_conn->conn_login_comp);
+       init_completion(&isert_conn->login_req_comp);
        init_completion(&isert_conn->conn_wait);
        init_completion(&isert_conn->conn_wait_comp_err);
        kref_init(&isert_conn->conn_kref);
        mutex_init(&isert_conn->conn_mutex);
 
-       cma_id->context = isert_conn;
        isert_conn->conn_cm_id = cma_id;
        isert_conn->responder_resources = event->param.conn.responder_resources;
        isert_conn->initiator_depth = event->param.conn.initiator_depth;
@@ -466,6 +492,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        if (ret)
                goto out_conn_dev;
 
+       ret = isert_rdma_post_recvl(isert_conn);
+       if (ret)
+               goto out_conn_dev;
+
+       ret = isert_rdma_accept(isert_conn);
+       if (ret)
+               goto out_conn_dev;
+
        mutex_lock(&isert_np->np_accept_mutex);
        list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
        mutex_unlock(&isert_np->np_accept_mutex);
@@ -486,6 +520,7 @@ out_login_buf:
        kfree(isert_conn->login_buf);
 out:
        kfree(isert_conn);
+       rdma_reject(cma_id, NULL, 0);
        return ret;
 }
 
@@ -498,18 +533,20 @@ isert_connect_release(struct isert_conn *isert_conn)
 
        pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
 
+       isert_free_rx_descriptors(isert_conn);
+       rdma_destroy_id(isert_conn->conn_cm_id);
+
        if (isert_conn->conn_qp) {
                cq_index = ((struct isert_cq_desc *)
                        isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
                pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
+               mutex_lock(&device_list_mutex);
                isert_conn->conn_device->cq_active_qps[cq_index]--;
+               mutex_unlock(&device_list_mutex);
 
-               rdma_destroy_qp(isert_conn->conn_cm_id);
+               ib_destroy_qp(isert_conn->conn_qp);
        }
 
-       isert_free_rx_descriptors(isert_conn);
-       rdma_destroy_id(isert_conn->conn_cm_id);
-
        if (isert_conn->login_buf) {
                ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
                                    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
@@ -529,9 +566,19 @@ isert_connect_release(struct isert_conn *isert_conn)
 static void
 isert_connected_handler(struct rdma_cm_id *cma_id)
 {
-       struct isert_conn *isert_conn = cma_id->context;
+       struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+       pr_info("conn %p\n", isert_conn);
 
-       kref_get(&isert_conn->conn_kref);
+       if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
+               pr_warn("conn %p connect_release is running\n", isert_conn);
+               return;
+       }
+
+       mutex_lock(&isert_conn->conn_mutex);
+       if (isert_conn->state != ISER_CONN_FULL_FEATURE)
+               isert_conn->state = ISER_CONN_UP;
+       mutex_unlock(&isert_conn->conn_mutex);
 }
 
 static void
@@ -552,65 +599,108 @@ isert_put_conn(struct isert_conn *isert_conn)
        kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
 }
 
+/**
+ * isert_conn_terminate() - Initiate connection termination
+ * @isert_conn: isert connection struct
+ *
+ * Notes:
+ * In case the connection state is FULL_FEATURE, move state
+ * to TEMINATING and start teardown sequence (rdma_disconnect).
+ * In case the connection state is UP, complete flush as well.
+ *
+ * This routine must be called with conn_mutex held. Thus it is
+ * safe to call multiple times.
+ */
 static void
-isert_disconnect_work(struct work_struct *work)
+isert_conn_terminate(struct isert_conn *isert_conn)
 {
-       struct isert_conn *isert_conn = container_of(work,
-                               struct isert_conn, conn_logout_work);
+       int err;
 
-       pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
-       mutex_lock(&isert_conn->conn_mutex);
-       if (isert_conn->state == ISER_CONN_UP)
+       switch (isert_conn->state) {
+       case ISER_CONN_TERMINATING:
+               break;
+       case ISER_CONN_UP:
+               /*
+                * No flush completions will occur as we didn't
+                * get to ISER_CONN_FULL_FEATURE yet, complete
+                * to allow teardown progress.
+                */
+               complete(&isert_conn->conn_wait_comp_err);
+       case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+               pr_info("Terminating conn %p state %d\n",
+                          isert_conn, isert_conn->state);
                isert_conn->state = ISER_CONN_TERMINATING;
-
-       if (isert_conn->post_recv_buf_count == 0 &&
-           atomic_read(&isert_conn->post_send_buf_count) == 0) {
-               mutex_unlock(&isert_conn->conn_mutex);
-               goto wake_up;
-       }
-       if (!isert_conn->conn_cm_id) {
-               mutex_unlock(&isert_conn->conn_mutex);
-               isert_put_conn(isert_conn);
-               return;
+               err = rdma_disconnect(isert_conn->conn_cm_id);
+               if (err)
+                       pr_warn("Failed rdma_disconnect isert_conn %p\n",
+                                  isert_conn);
+               break;
+       default:
+               pr_warn("conn %p teminating in state %d\n",
+                          isert_conn, isert_conn->state);
        }
+}
 
-       if (isert_conn->disconnect) {
-               /* Send DREQ/DREP towards our initiator */
-               rdma_disconnect(isert_conn->conn_cm_id);
-       }
+static int
+isert_np_cma_handler(struct isert_np *isert_np,
+                    enum rdma_cm_event_type event)
+{
+       pr_debug("isert np %p, handling event %d\n", isert_np, event);
 
-       mutex_unlock(&isert_conn->conn_mutex);
+       switch (event) {
+       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+               isert_np->np_cm_id = NULL;
+               break;
+       case RDMA_CM_EVENT_ADDR_CHANGE:
+               isert_np->np_cm_id = isert_setup_id(isert_np);
+               if (IS_ERR(isert_np->np_cm_id)) {
+                       pr_err("isert np %p setup id failed: %ld\n",
+                                isert_np, PTR_ERR(isert_np->np_cm_id));
+                       isert_np->np_cm_id = NULL;
+               }
+               break;
+       default:
+               pr_err("isert np %p Unexpected event %d\n",
+                         isert_np, event);
+       }
 
-wake_up:
-       complete(&isert_conn->conn_wait);
+       return -1;
 }
 
 static int
-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
+isert_disconnected_handler(struct rdma_cm_id *cma_id,
+                          enum rdma_cm_event_type event)
 {
+       struct isert_np *isert_np = cma_id->context;
        struct isert_conn *isert_conn;
 
-       if (!cma_id->qp) {
-               struct isert_np *isert_np = cma_id->context;
+       if (isert_np->np_cm_id == cma_id)
+               return isert_np_cma_handler(cma_id->context, event);
 
-               isert_np->np_cm_id = NULL;
-               return -1;
-       }
+       isert_conn = cma_id->qp->qp_context;
 
-       isert_conn = (struct isert_conn *)cma_id->context;
+       mutex_lock(&isert_conn->conn_mutex);
+       isert_conn_terminate(isert_conn);
+       mutex_unlock(&isert_conn->conn_mutex);
 
-       isert_conn->disconnect = disconnect;
-       INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
-       schedule_work(&isert_conn->conn_logout_work);
+       pr_info("conn %p completing conn_wait\n", isert_conn);
+       complete(&isert_conn->conn_wait);
 
        return 0;
 }
 
+static void
+isert_connect_error(struct rdma_cm_id *cma_id)
+{
+       struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+       isert_put_conn(isert_conn);
+}
+
 static int
 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
        int ret = 0;
-       bool disconnect = false;
 
        pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
                 event->event, event->status, cma_id->context, cma_id);
@@ -628,11 +718,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
        case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
        case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
-               disconnect = true;
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
-               ret = isert_disconnected_handler(cma_id, disconnect);
+               ret = isert_disconnected_handler(cma_id, event->event);
                break;
+       case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
+       case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
        case RDMA_CM_EVENT_CONNECT_ERROR:
+               isert_connect_error(cma_id);
+               break;
        default:
                pr_err("Unhandled RDMA CMA event: %d\n", event->event);
                break;
@@ -834,7 +927,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
                        if (ret)
                                return ret;
 
-                       isert_conn->state = ISER_CONN_UP;
+                       /* Now we are in FULL_FEATURE phase */
+                       mutex_lock(&isert_conn->conn_mutex);
+                       isert_conn->state = ISER_CONN_FULL_FEATURE;
+                       mutex_unlock(&isert_conn->conn_mutex);
                        goto post_send;
                }
 
@@ -851,18 +947,17 @@ post_send:
 }
 
 static void
-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
-                  struct isert_conn *isert_conn)
+isert_rx_login_req(struct isert_conn *isert_conn)
 {
+       struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
+       int rx_buflen = isert_conn->login_req_len;
        struct iscsi_conn *conn = isert_conn->conn;
        struct iscsi_login *login = conn->conn_login;
        int size;
 
-       if (!login) {
-               pr_err("conn->conn_login is NULL\n");
-               dump_stack();
-               return;
-       }
+       pr_info("conn %p\n", isert_conn);
+
+       WARN_ON_ONCE(!login);
 
        if (login->first_request) {
                struct iscsi_login_req *login_req =
@@ -892,7 +987,8 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
                 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
        memcpy(login->req_buf, &rx_desc->data[0], size);
 
-       complete(&isert_conn->conn_login_comp);
+       if (login->first_request)
+               complete(&isert_conn->conn_login_comp);
 }
 
 static void
@@ -1169,11 +1265,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
                 hdr->opcode, hdr->itt, hdr->flags,
                 (int)(xfer_len - ISER_HEADERS_LEN));
 
-       if ((char *)desc == isert_conn->login_req_buf)
-               isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
-                                  isert_conn);
-       else
+       if ((char *)desc == isert_conn->login_req_buf) {
+               isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
+               if (isert_conn->conn) {
+                       struct iscsi_login *login = isert_conn->conn->conn_login;
+
+                       if (login && !login->first_request)
+                               isert_rx_login_req(isert_conn);
+               }
+               mutex_lock(&isert_conn->conn_mutex);
+               complete(&isert_conn->login_req_comp);
+               mutex_unlock(&isert_conn->conn_mutex);
+       } else {
                isert_rx_do_work(desc, isert_conn);
+       }
 
        ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
                                      DMA_FROM_DEVICE);
@@ -1483,7 +1588,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
                msleep(3000);
 
        mutex_lock(&isert_conn->conn_mutex);
-       isert_conn->state = ISER_CONN_DOWN;
+       isert_conn_terminate(isert_conn);
        mutex_unlock(&isert_conn->conn_mutex);
 
        iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
@@ -2044,13 +2149,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
        return ret;
 }
 
+struct rdma_cm_id *
+isert_setup_id(struct isert_np *isert_np)
+{
+       struct iscsi_np *np = isert_np->np;
+       struct rdma_cm_id *id;
+       struct sockaddr *sa;
+       int ret;
+
+       sa = (struct sockaddr *)&np->np_sockaddr;
+       pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
+
+       id = rdma_create_id(isert_cma_handler, isert_np,
+                           RDMA_PS_TCP, IB_QPT_RC);
+       if (IS_ERR(id)) {
+               pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
+               ret = PTR_ERR(id);
+               goto out;
+       }
+       pr_debug("id %p context %p\n", id, id->context);
+
+       ret = rdma_bind_addr(id, sa);
+       if (ret) {
+               pr_err("rdma_bind_addr() failed: %d\n", ret);
+               goto out_id;
+       }
+
+       ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
+       if (ret) {
+               pr_err("rdma_listen() failed: %d\n", ret);
+               goto out_id;
+       }
+
+       return id;
+out_id:
+       rdma_destroy_id(id);
+out:
+       return ERR_PTR(ret);
+}
+
 static int
 isert_setup_np(struct iscsi_np *np,
               struct __kernel_sockaddr_storage *ksockaddr)
 {
        struct isert_np *isert_np;
        struct rdma_cm_id *isert_lid;
-       struct sockaddr *sa;
        int ret;
 
        isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
@@ -2062,9 +2205,8 @@ isert_setup_np(struct iscsi_np *np,
        mutex_init(&isert_np->np_accept_mutex);
        INIT_LIST_HEAD(&isert_np->np_accept_list);
        init_completion(&isert_np->np_login_comp);
+       isert_np->np = np;
 
-       sa = (struct sockaddr *)ksockaddr;
-       pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
        /*
         * Setup the np->np_sockaddr from the passed sockaddr setup
         * in iscsi_target_configfs.c code..
@@ -2072,37 +2214,20 @@ isert_setup_np(struct iscsi_np *np,
        memcpy(&np->np_sockaddr, ksockaddr,
               sizeof(struct __kernel_sockaddr_storage));
 
-       isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
-                               IB_QPT_RC);
+       isert_lid = isert_setup_id(isert_np);
        if (IS_ERR(isert_lid)) {
-               pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
-                      PTR_ERR(isert_lid));
                ret = PTR_ERR(isert_lid);
                goto out;
        }
 
-       ret = rdma_bind_addr(isert_lid, sa);
-       if (ret) {
-               pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
-               goto out_lid;
-       }
-
-       ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
-       if (ret) {
-               pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
-               goto out_lid;
-       }
-
        isert_np->np_cm_id = isert_lid;
        np->np_context = isert_np;
-       pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
 
        return 0;
 
-out_lid:
-       rdma_destroy_id(isert_lid);
 out:
        kfree(isert_np);
+
        return ret;
 }
 
@@ -2138,13 +2263,27 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
        int ret;
 
-       pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
+       pr_info("before login_req comp conn: %p\n", isert_conn);
+       ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
+       if (ret) {
+               pr_err("isert_conn %p interrupted before got login req\n",
+                       isert_conn);
+               return ret;
+       }
+       isert_conn->login_req_comp.done = 0;
+
+       if (!login->first_request)
+               return 0;
+
+       isert_rx_login_req(isert_conn);
+
+       pr_info("before conn_login_comp conn: %p\n", conn);
 
        ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
        if (ret)
                return ret;
 
-       pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
+       pr_info("processing login->req: %p\n", login->req);
        return 0;
 }
 
@@ -2222,17 +2361,10 @@ accept_wait:
        isert_conn->conn = conn;
        max_accept = 0;
 
-       ret = isert_rdma_post_recvl(isert_conn);
-       if (ret)
-               return ret;
-
-       ret = isert_rdma_accept(isert_conn);
-       if (ret)
-               return ret;
-
        isert_set_conn_info(np, conn, isert_conn);
 
-       pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
+       pr_debug("Processing isert_conn: %p\n", isert_conn);
+
        return 0;
 }
 
@@ -2248,6 +2380,24 @@ isert_free_np(struct iscsi_np *np)
        kfree(isert_np);
 }
 
+static void isert_release_work(struct work_struct *work)
+{
+       struct isert_conn *isert_conn = container_of(work,
+                                                    struct isert_conn,
+                                                    release_work);
+
+       pr_info("Starting release conn %p\n", isert_conn);
+
+       wait_for_completion(&isert_conn->conn_wait);
+
+       mutex_lock(&isert_conn->conn_mutex);
+       isert_conn->state = ISER_CONN_DOWN;
+       mutex_unlock(&isert_conn->conn_mutex);
+
+       pr_info("Destroying conn %p\n", isert_conn);
+       isert_put_conn(isert_conn);
+}
+
 static void isert_wait_conn(struct iscsi_conn *conn)
 {
        struct isert_conn *isert_conn = conn->context;
@@ -2255,10 +2405,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        pr_debug("isert_wait_conn: Starting \n");
 
        mutex_lock(&isert_conn->conn_mutex);
-       if (isert_conn->conn_cm_id) {
-               pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
-               rdma_disconnect(isert_conn->conn_cm_id);
-       }
        /*
         * Only wait for conn_wait_comp_err if the isert_conn made it
         * into full feature phase..
@@ -2267,14 +2413,13 @@ static void isert_wait_conn(struct iscsi_conn *conn)
                mutex_unlock(&isert_conn->conn_mutex);
                return;
        }
-       if (isert_conn->state == ISER_CONN_UP)
-               isert_conn->state = ISER_CONN_TERMINATING;
+       isert_conn_terminate(isert_conn);
        mutex_unlock(&isert_conn->conn_mutex);
 
        wait_for_completion(&isert_conn->conn_wait_comp_err);
 
-       wait_for_completion(&isert_conn->conn_wait);
-       isert_put_conn(isert_conn);
+       INIT_WORK(&isert_conn->release_work, isert_release_work);
+       queue_work(isert_release_wq, &isert_conn->release_work);
 }
 
 static void isert_free_conn(struct iscsi_conn *conn)
@@ -2320,20 +2465,30 @@ static int __init isert_init(void)
                goto destroy_rx_wq;
        }
 
+       isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
+                                       WQ_UNBOUND_MAX_ACTIVE);
+       if (!isert_release_wq) {
+               pr_err("Unable to allocate isert_release_wq\n");
+               ret = -ENOMEM;
+               goto destroy_comp_wq;
+       }
+
        isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
                        sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
                        0, NULL);
        if (!isert_cmd_cache) {
                pr_err("Unable to create isert_cmd_cache\n");
                ret = -ENOMEM;
-               goto destroy_tx_cq;
+               goto destroy_release_wq;
        }
 
        iscsit_register_transport(&iser_target_transport);
-       pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
+       pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
        return 0;
 
-destroy_tx_cq:
+destroy_release_wq:
+       destroy_workqueue(isert_release_wq);
+destroy_comp_wq:
        destroy_workqueue(isert_comp_wq);
 destroy_rx_wq:
        destroy_workqueue(isert_rx_wq);
@@ -2344,6 +2499,7 @@ static void __exit isert_exit(void)
 {
        flush_scheduled_work();
        kmem_cache_destroy(isert_cmd_cache);
+       destroy_workqueue(isert_release_wq);
        destroy_workqueue(isert_comp_wq);
        destroy_workqueue(isert_rx_wq);
        iscsit_unregister_transport(&iser_target_transport);
index 032f65abee3694ec56122dd0bd9091eb5e84caa6..b233ee5e46b0ac67cb735449a7a9ccc87e9d7478 100644 (file)
@@ -21,6 +21,7 @@ enum iser_ib_op_code {
 enum iser_conn_state {
        ISER_CONN_INIT,
        ISER_CONN_UP,
+       ISER_CONN_FULL_FEATURE,
        ISER_CONN_TERMINATING,
        ISER_CONN_DOWN,
 };
@@ -87,6 +88,7 @@ struct isert_conn {
        char                    *login_req_buf;
        char                    *login_rsp_buf;
        u64                     login_req_dma;
+       int                     login_req_len;
        u64                     login_rsp_dma;
        unsigned int            conn_rx_desc_head;
        struct iser_rx_desc     *conn_rx_descs;
@@ -94,18 +96,18 @@ struct isert_conn {
        struct iscsi_conn       *conn;
        struct list_head        conn_accept_node;
        struct completion       conn_login_comp;
+       struct completion       login_req_comp;
        struct iser_tx_desc     conn_login_tx_desc;
        struct rdma_cm_id       *conn_cm_id;
        struct ib_pd            *conn_pd;
        struct ib_mr            *conn_mr;
        struct ib_qp            *conn_qp;
        struct isert_device     *conn_device;
-       struct work_struct      conn_logout_work;
        struct mutex            conn_mutex;
        struct completion       conn_wait;
        struct completion       conn_wait_comp_err;
        struct kref             conn_kref;
-       bool                    disconnect;
+       struct work_struct      release_work;
 };
 
 #define ISERT_MAX_CQ 64
@@ -131,6 +133,7 @@ struct isert_device {
 };
 
 struct isert_np {
+       struct iscsi_np         *np;
        struct semaphore        np_sem;
        struct rdma_cm_id       *np_cm_id;
        struct mutex            np_accept_mutex;
index 78227f32d6fab67137d34a0fc6324d9839861e04..4de2571938b8b97870e91a04b7fac12a836dbaf9 100644 (file)
@@ -151,6 +151,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
                },
        },
+       {
+               /* Medion Akoya E7225 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+               },
+       },
        {
                /* Blue FB5601 */
                .matches = {
index b361ce4ce511946adefa3be0b3c8374aac714e3d..4a10c1562d0fee8c9a433d99c3b68ae405049f73 100644 (file)
@@ -648,7 +648,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (!cmd) {
                DMERR("could not allocate metadata struct");
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
        atomic_set(&cmd->ref_count, 1);
@@ -710,7 +710,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
                return cmd;
 
        cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
-       if (cmd) {
+       if (!IS_ERR(cmd)) {
                mutex_lock(&table_lock);
                cmd2 = lookup(bdev);
                if (cmd2) {
@@ -745,9 +745,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
 {
        struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
                                                       may_format_device, policy_hint_size);
-       if (cmd && !same_params(cmd, data_block_size)) {
+
+       if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
                dm_cache_metadata_close(cmd);
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        return cmd;
index 86a2a5e3b26bacdc0c773076cf7283cacd188701..39996ca58ce60a0d228fc4cca271d79c5caa6e16 100644 (file)
@@ -2457,6 +2457,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
+       if (get_pool_mode(pool) >= PM_READ_ONLY) {
+               DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
+                     dm_device_name(pool->pool_md));
+               return -EINVAL;
+       }
+
        if (!strcasecmp(argv[0], "create_thin"))
                r = process_create_thin_mesg(argc, argv, pool);
 
index 63fb90b006ba35e64665c43804621a77c9f3265d..a3fb8b51038a17ae5d4ec3989c942afee4644ca9 100644 (file)
@@ -579,7 +579,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
                          usb_sndbulkpipe(dev->udev,
                                          dev->bulk_out->bEndpointAddress),
                          buf, msg->len,
-                         kvaser_usb_simple_msg_callback, priv);
+                         kvaser_usb_simple_msg_callback, netdev);
        usb_anchor_urb(urb, &priv->tx_submitted);
 
        err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -654,11 +654,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
        priv = dev->nets[channel];
        stats = &priv->netdev->stats;
 
-       if (status & M16C_STATE_BUS_RESET) {
-               kvaser_usb_unlink_tx_urbs(priv);
-               return;
-       }
-
        skb = alloc_can_err_skb(priv->netdev, &cf);
        if (!skb) {
                stats->rx_dropped++;
@@ -669,7 +664,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
 
        netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
 
-       if (status & M16C_STATE_BUS_OFF) {
+       if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
                cf->can_id |= CAN_ERR_BUSOFF;
 
                priv->can.can_stats.bus_off++;
@@ -695,9 +690,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
                }
 
                new_state = CAN_STATE_ERROR_PASSIVE;
-       }
-
-       if (status == M16C_STATE_BUS_ERROR) {
+       } else if (status & M16C_STATE_BUS_ERROR) {
                if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
                    ((txerr >= 96) || (rxerr >= 96))) {
                        cf->can_id |= CAN_ERR_CRTL;
@@ -707,7 +700,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
 
                        priv->can.can_stats.error_warning++;
                        new_state = CAN_STATE_ERROR_WARNING;
-               } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+               } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
+                          ((txerr < 96) && (rxerr < 96))) {
                        cf->can_id |= CAN_ERR_PROT;
                        cf->data[2] = CAN_ERR_PROT_ACTIVE;
 
@@ -1583,7 +1577,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 {
        struct kvaser_usb *dev;
        int err = -ENOMEM;
-       int i;
+       int i, retry = 3;
 
        dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
        if (!dev)
@@ -1601,7 +1595,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 
        usb_set_intfdata(intf, dev);
 
-       err = kvaser_usb_get_software_info(dev);
+       /* On some x86 laptops, plugging a Kvaser device again after
+        * an unplug makes the firmware always ignore the very first
+        * command. For such a case, provide some room for retries
+        * instead of completely exiting the driver.
+        */
+       do {
+               err = kvaser_usb_get_software_info(dev);
+       } while (--retry && err == -ETIMEDOUT);
+
        if (err) {
                dev_err(&intf->dev,
                        "Cannot get software infos, error %d\n", err);
index b1ab3a4956a5b83e70097242afd5e3d7d7fe4173..e18240de159c40e66b601323b215a19b6cbcdbb1 100644 (file)
@@ -1293,6 +1293,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
        if (vid == priv->data.default_vlan)
                return 0;
 
+       if (priv->data.dual_emac) {
+               /* In dual EMAC, reserved VLAN id should not be used for
+                * creating VLAN interfaces as this can break the dual
+                * EMAC port separation
+                */
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (vid == priv->slaves[i].port_vlan)
+                               return -EINVAL;
+               }
+       }
+
        dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
        return cpsw_add_vlan_ale_entry(priv, vid);
 }
@@ -1306,6 +1319,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
        if (vid == priv->data.default_vlan)
                return 0;
 
+       if (priv->data.dual_emac) {
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (vid == priv->slaves[i].port_vlan)
+                               return -EINVAL;
+               }
+       }
+
        dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
        ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
        if (ret != 0)
index a86d1232613739ec70837326b2bd2aadbb5408cd..e873e8f0070dd3c8386d05fbcd1bd7b9138dcbbf 100644 (file)
@@ -1410,7 +1410,7 @@ struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
 }
 EXPORT_SYMBOL_GPL(regulator_get_exclusive);
 
-/* Locks held by regulator_put() */
+/* regulator_list_mutex lock held by regulator_put() */
 static void _regulator_put(struct regulator *regulator)
 {
        struct regulator_dev *rdev;
@@ -1425,12 +1425,14 @@ static void _regulator_put(struct regulator *regulator)
        /* remove any sysfs entries */
        if (regulator->dev)
                sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
+       mutex_lock(&rdev->mutex);
        kfree(regulator->supply_name);
        list_del(&regulator->list);
        kfree(regulator);
 
        rdev->open_count--;
        rdev->exclusive = 0;
+       mutex_unlock(&rdev->mutex);
 
        module_put(rdev->owner);
 }
index 0791c92e8c505cc9a35158bcdcdf0a7612ae92de..1389fefe8814105a857c0dcb24d5a64485080ca5 100644 (file)
@@ -222,7 +222,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
        iounmap(clk_reg);
 
        dws->num_cs = 16;
-       dws->fifo_len = 40;     /* FIFO has 40 words buffer */
 
 #ifdef CONFIG_SPI_DW_MID_DMA
        dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
index d26a2d195d217ca5f5c5181e68ec732ebae4f767..cc42ee5e19fba4a54ff41d3a7472de494f22b0e9 100644 (file)
@@ -393,8 +393,8 @@ static void giveback(struct driver_data *drv_data)
                        cs_deassert(drv_data);
        }
 
-       spi_finalize_current_message(drv_data->master);
        drv_data->cur_chip = NULL;
+       spi_finalize_current_message(drv_data->master);
 }
 
 static void reset_sccr1(struct driver_data *drv_data)
index e14e105acff8f8bd1d7b5498621ab644a817b2b9..0493e8b1ba8fdd813ef28f23523b26dd3af2f6b2 100644 (file)
@@ -1360,6 +1360,9 @@ old_sess_out:
                conn->sock = NULL;
        }
 
+       if (conn->conn_transport->iscsit_wait_conn)
+               conn->conn_transport->iscsit_wait_conn(conn);
+
        if (conn->conn_transport->iscsit_free_conn)
                conn->conn_transport->iscsit_free_conn(conn);
 
index 7c908141cc8a7980d29f151960369c063e06a122..9c02eb41ea90f830bf948b9072efb61f1004aa3c 100644 (file)
@@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
                goto out_done;
        }
 
-       tl_nexus = tl_hba->tl_nexus;
+       tl_nexus = tl_tpg->tl_nexus;
        if (!tl_nexus) {
                scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
                                " does not exist\n");
@@ -257,21 +257,21 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
         * Locate the tcm_loop_hba_t pointer
         */
        tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+       /*
+        * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
+        */
+       tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+       se_tpg = &tl_tpg->tl_se_tpg;
        /*
         * Locate the tl_nexus and se_sess pointers
         */
-       tl_nexus = tl_hba->tl_nexus;
+       tl_nexus = tl_tpg->tl_nexus;
        if (!tl_nexus) {
                pr_err("Unable to perform device reset without"
                                " active I_T Nexus\n");
                return FAILED;
        }
        se_sess = tl_nexus->se_sess;
-       /*
-        * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
-        */
-       tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
-       se_tpg = &tl_tpg->tl_se_tpg;
 
        tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
        if (!tl_cmd) {
@@ -879,8 +879,8 @@ static int tcm_loop_make_nexus(
        struct tcm_loop_nexus *tl_nexus;
        int ret = -ENOMEM;
 
-       if (tl_tpg->tl_hba->tl_nexus) {
-               pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
+       if (tl_tpg->tl_nexus) {
+               pr_debug("tl_tpg->tl_nexus already exists\n");
                return -EEXIST;
        }
        se_tpg = &tl_tpg->tl_se_tpg;
@@ -915,7 +915,7 @@ static int tcm_loop_make_nexus(
         */
        __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
                        tl_nexus->se_sess, tl_nexus);
-       tl_tpg->tl_hba->tl_nexus = tl_nexus;
+       tl_tpg->tl_nexus = tl_nexus;
        pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
                " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
                name);
@@ -931,9 +931,8 @@ static int tcm_loop_drop_nexus(
 {
        struct se_session *se_sess;
        struct tcm_loop_nexus *tl_nexus;
-       struct tcm_loop_hba *tl_hba = tpg->tl_hba;
 
-       tl_nexus = tpg->tl_hba->tl_nexus;
+       tl_nexus = tpg->tl_nexus;
        if (!tl_nexus)
                return -ENODEV;
 
@@ -949,13 +948,13 @@ static int tcm_loop_drop_nexus(
        }
 
        pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
-               " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+               " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
                tl_nexus->se_sess->se_node_acl->initiatorname);
        /*
         * Release the SCSI I_T Nexus to the emulated SAS Target Port
         */
        transport_deregister_session(tl_nexus->se_sess);
-       tpg->tl_hba->tl_nexus = NULL;
+       tpg->tl_nexus = NULL;
        kfree(tl_nexus);
        return 0;
 }
@@ -971,7 +970,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
        struct tcm_loop_nexus *tl_nexus;
        ssize_t ret;
 
-       tl_nexus = tl_tpg->tl_hba->tl_nexus;
+       tl_nexus = tl_tpg->tl_nexus;
        if (!tl_nexus)
                return -ENODEV;
 
index dd7a84ee78e1129db36e7ab625731666b05125ef..4ed85886a1ee8fc5508c2cfa15bce58a8591aefc 100644 (file)
@@ -25,11 +25,6 @@ struct tcm_loop_tmr {
 };
 
 struct tcm_loop_nexus {
-       int it_nexus_active;
-       /*
-        * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
-        */
-       struct scsi_host *sh;
        /*
         * Pointer to TCM session for I_T Nexus
         */
@@ -45,6 +40,7 @@ struct tcm_loop_tpg {
        atomic_t tl_tpg_port_count;
        struct se_portal_group tl_se_tpg;
        struct tcm_loop_hba *tl_hba;
+       struct tcm_loop_nexus *tl_nexus;
 };
 
 struct tcm_loop_hba {
@@ -53,7 +49,6 @@ struct tcm_loop_hba {
        struct se_hba_s *se_hba;
        struct se_lun *tl_hba_lun;
        struct se_port *tl_hba_lun_sep;
-       struct tcm_loop_nexus *tl_nexus;
        struct device dev;
        struct Scsi_Host *sh;
        struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
index 2be407e22eb499902bd2320e976b0c178fe4bfd3..4deb0c997b1bfa70d2c4de64064f0313a8e847b2 100644 (file)
@@ -1037,10 +1037,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
                                " changed for TCM/pSCSI\n", dev);
                return -EINVAL;
        }
-       if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
+       if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
                pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
-                       " greater than fabric_max_sectors: %u\n", dev,
-                       optimal_sectors, dev->dev_attrib.fabric_max_sectors);
+                       " greater than hw_max_sectors: %u\n", dev,
+                       optimal_sectors, dev->dev_attrib.hw_max_sectors);
                return -EINVAL;
        }
 
@@ -1442,7 +1442,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
                                DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
        dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
        dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
-       dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
 
        return dev;
 }
@@ -1475,6 +1474,7 @@ int target_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_max_sectors =
                se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
                                         dev->dev_attrib.hw_block_size);
+       dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 
        dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
        dev->creation_time = get_jiffies_64();
index 3b2879316b879cebfc8e5fbb44c6a8679555a711..8baaa0a26d70c7cffb24a6b46d325686e2f89c78 100644 (file)
@@ -554,7 +554,16 @@ fd_execute_rw(struct se_cmd *cmd)
        enum dma_data_direction data_direction = cmd->data_direction;
        struct se_device *dev = cmd->se_dev;
        int ret = 0;
-
+       /*
+        * We are currently limited by the number of iovecs (2048) per
+        * single vfs_[writev,readv] call.
+        */
+       if (cmd->data_length > FD_MAX_BYTES) {
+               pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+                      "FD_MAX_BYTES: %u iovec count limitiation\n",
+                       cmd->data_length, FD_MAX_BYTES);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        /*
         * Call vectorized fileio functions to map struct scatterlist
         * physical memory addresses to struct iovec virtual memory.
index aa1620abec6dc0b1ccb5a78305ca2ed41800ac5d..b358b3d6c20169d660ecce7fb73c13ab44906f0c 100644 (file)
@@ -122,7 +122,7 @@ static int iblock_configure_device(struct se_device *dev)
        q = bdev_get_queue(bd);
 
        dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
-       dev->dev_attrib.hw_max_sectors = UINT_MAX;
+       dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
        dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
        /*
index 0ef75fb0ecbae3cafb85180bd86efbe0c0b75c35..92e6c510e5d0bfa3e7e85e49e9f9e0eabd9caf97 100644 (file)
@@ -561,21 +561,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
        if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
                unsigned long long end_lba;
 
-               if (sectors > dev->dev_attrib.fabric_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds fabric_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.fabric_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
-               if (sectors > dev->dev_attrib.hw_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds backend hw_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.hw_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
-
                end_lba = dev->transport->get_blocks(dev) + 1;
                if (cmd->t_task_lba + sectors > end_lba) {
                        pr_err("cmd exceeds last lba %llu "
index 34254b2ec4668fd0e285f5bb2d5770e4ee274b46..9998ae23cc7c429402a00bfe075fad68c0d8c274 100644 (file)
@@ -444,7 +444,6 @@ static sense_reason_t
 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
-       u32 max_sectors;
        int have_tp = 0;
 
        /*
@@ -469,9 +468,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
        /*
         * Set MAXIMUM TRANSFER LENGTH
         */
-       max_sectors = min(dev->dev_attrib.fabric_max_sectors,
-                         dev->dev_attrib.hw_max_sectors);
-       put_unaligned_be32(max_sectors, &buf[8]);
+       put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
 
        /*
         * Set OPTIMAL TRANSFER LENGTH
index 962c7e3c3baabf4de83ad50519a5ca8398d13bb8..fb97bc0b80e78a394b05e6ffe8b5d00fde6120b1 100644 (file)
@@ -820,6 +820,23 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
        return 0;
 }
 
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+       switch (attr) {
+       case VIRTIO_SCSI_S_SIMPLE:
+               return MSG_SIMPLE_TAG;
+       case VIRTIO_SCSI_S_ORDERED:
+               return MSG_ORDERED_TAG;
+       case VIRTIO_SCSI_S_HEAD:
+               return MSG_HEAD_TAG;
+       case VIRTIO_SCSI_S_ACA:
+               return MSG_ACA_TAG;
+       default:
+               break;
+       }
+       return MSG_SIMPLE_TAG;
+}
+
 static void tcm_vhost_submission_work(struct work_struct *work)
 {
        struct tcm_vhost_cmd *tv_cmd =
@@ -846,9 +863,9 @@ static void tcm_vhost_submission_work(struct work_struct *work)
        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
                        tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
                        tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
-                       tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
-                       0, sg_ptr, tv_cmd->tvc_sgl_count,
-                       sg_bidi_ptr, sg_no_bidi);
+                       vhost_scsi_to_tcm_attr(tv_cmd->tvc_task_attr),
+                       tv_cmd->tvc_data_direction, 0, sg_ptr,
+                       tv_cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -1150,6 +1167,7 @@ static int vhost_scsi_set_endpoint(
        struct vhost_scsi *vs,
        struct vhost_scsi_target *t)
 {
+       struct se_portal_group *se_tpg;
        struct tcm_vhost_tport *tv_tport;
        struct tcm_vhost_tpg *tv_tpg;
        struct tcm_vhost_tpg **vs_tpg;
@@ -1197,6 +1215,21 @@ static int vhost_scsi_set_endpoint(
                                ret = -EEXIST;
                                goto out;
                        }
+                       /*
+                        * In order to ensure individual vhost-scsi configfs
+                        * groups cannot be removed while in use by vhost ioctl,
+                        * go ahead and take an explicit se_tpg->tpg_group.cg_item
+                        * dependency now.
+                        */
+                       se_tpg = &tv_tpg->se_tpg;
+                       ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
+                                                  &se_tpg->tpg_group.cg_item);
+                       if (ret) {
+                               pr_warn("configfs_depend_item() failed: %d\n", ret);
+                               kfree(vs_tpg);
+                               mutex_unlock(&tv_tpg->tv_tpg_mutex);
+                               goto out;
+                       }
                        tv_tpg->tv_tpg_vhost_count++;
                        tv_tpg->vhost_scsi = vs;
                        vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
@@ -1240,6 +1273,7 @@ static int vhost_scsi_clear_endpoint(
        struct vhost_scsi *vs,
        struct vhost_scsi_target *t)
 {
+       struct se_portal_group *se_tpg;
        struct tcm_vhost_tport *tv_tport;
        struct tcm_vhost_tpg *tv_tpg;
        struct vhost_virtqueue *vq;
@@ -1288,6 +1322,13 @@ static int vhost_scsi_clear_endpoint(
                vs->vs_tpg[target] = NULL;
                match = true;
                mutex_unlock(&tv_tpg->tv_tpg_mutex);
+               /*
+                * Release se_tpg->tpg_group.cg_item configfs dependency now
+                * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
+                */
+               se_tpg = &tv_tpg->se_tpg;
+               configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
+                                      &se_tpg->tpg_group.cg_item);
        }
        if (match) {
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
index 725e87538c98ad71ae5f28ded813d9e61fb8a6d1..615c5079db7c902b286f40823601a8578022fc37 100644 (file)
@@ -123,6 +123,12 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
  */
 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
 {
+       struct inode *inode = iocb->ki_filp->f_mapping->host;
+
+       /* we only support swap file calling nfs_direct_IO */
+       if (!IS_SWAPFILE(inode))
+               return 0;
+
 #ifndef CONFIG_NFS_SWAP
        dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
                        iocb->ki_filp->f_path.dentry->d_name.name,
index 519833d0457ec75b9dc5519424b59ba3e073a20d..5f8d5ffdad8f8f2f39903682671363722d6ccc16 100644 (file)
@@ -462,7 +462,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
                        prev = pos;
 
                        status = nfs_wait_client_init_complete(pos);
-                       if (status == 0) {
+                       if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
                                nfs4_schedule_lease_recovery(pos);
                                status = nfs4_wait_clnt_recover(pos);
                        }
index 3ba30825f387d847c04054f85608f735458e811b..66c8c2fe86b702f1b967f9c5520c9526f5ad0a29 100644 (file)
@@ -178,6 +178,8 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
        if (p->psi->erase)
                p->psi->erase(p->type, p->id, p->count,
                              dentry->d_inode->i_ctime, p->psi);
+       else
+               return -EPERM;
 
        return simple_unlink(dir, dentry);
 }
@@ -334,9 +336,8 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
 
        mutex_lock(&root->d_inode->i_mutex);
 
-       rc = -ENOSPC;
        dentry = d_alloc_name(root, name);
-       if (IS_ERR(dentry))
+       if (!dentry)
                goto fail_lockedalloc;
 
        memcpy(private->data, data, size);
index c5684c92266ee0209d6a925d5b3830dab551638a..283bda06cd8c19345336d5e76a3e420bc5a20d9d 100644 (file)
@@ -92,6 +92,7 @@ struct ramoops_context {
        struct persistent_ram_ecc_info ecc_info;
        unsigned int max_dump_cnt;
        unsigned int dump_write_cnt;
+       /* _read_cnt need clear on ramoops_pstore_open */
        unsigned int dump_read_cnt;
        unsigned int console_read_cnt;
        unsigned int ftrace_read_cnt;
@@ -107,6 +108,7 @@ static int ramoops_pstore_open(struct pstore_info *psi)
 
        cxt->dump_read_cnt = 0;
        cxt->console_read_cnt = 0;
+       cxt->ftrace_read_cnt = 0;
        return 0;
 }
 
@@ -123,13 +125,15 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
                return NULL;
 
        prz = przs[i];
+       if (!prz)
+               return NULL;
 
-       if (update) {
-               /* Update old/shadowed buffer. */
+       /* Update old/shadowed buffer. */
+       if (update)
                persistent_ram_save_old(prz);
-               if (!persistent_ram_old_size(prz))
-                       return NULL;
-       }
+
+       if (!persistent_ram_old_size(prz))
+               return NULL;
 
        *typep = type;
        *id = i;
@@ -421,7 +425,6 @@ static int ramoops_probe(struct platform_device *pdev)
        if (!is_power_of_2(pdata->ftrace_size))
                pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
 
-       cxt->dump_read_cnt = 0;
        cxt->size = pdata->mem_size;
        cxt->phys_addr = pdata->mem_address;
        cxt->memtype = pdata->mem_type;
index 6ff97553331b8d67192e8c1dba31cef75d3f6eec..bda61a759b684f58e6212064e0dc6307be99a13b 100644 (file)
@@ -46,7 +46,7 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
 }
 
 /* increase and wrap the start pointer, returning the old value */
-static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
+static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
 {
        int old;
        int new;
@@ -62,7 +62,7 @@ static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
 }
 
 /* increase the size counter until it hits the max size */
-static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
+static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
 {
        size_t old;
        size_t new;
@@ -78,6 +78,53 @@ static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
        } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
 }
 
+static DEFINE_RAW_SPINLOCK(buffer_lock);
+
+/* increase and wrap the start pointer, returning the old value */
+static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
+{
+       int old;
+       int new;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&buffer_lock, flags);
+
+       old = atomic_read(&prz->buffer->start);
+       new = old + a;
+       while (unlikely(new > prz->buffer_size))
+               new -= prz->buffer_size;
+       atomic_set(&prz->buffer->start, new);
+
+       raw_spin_unlock_irqrestore(&buffer_lock, flags);
+
+       return old;
+}
+
+/* increase the size counter until it hits the max size */
+static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
+{
+       size_t old;
+       size_t new;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&buffer_lock, flags);
+
+       old = atomic_read(&prz->buffer->size);
+       if (old == prz->buffer_size)
+               goto exit;
+
+       new = old + a;
+       if (new > prz->buffer_size)
+               new = prz->buffer_size;
+       atomic_set(&prz->buffer->size, new);
+
+exit:
+       raw_spin_unlock_irqrestore(&buffer_lock, flags);
+}
+
+static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
+static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
+
 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
        uint8_t *data, size_t len, uint8_t *ecc)
 {
@@ -379,6 +426,9 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
                return NULL;
        }
 
+       buffer_start_add = buffer_start_add_locked;
+       buffer_size_add = buffer_size_add_locked;
+
        if (memtype)
                va = ioremap(start, size);
        else
index 650b563836801f41809d1e0170f9ffe0e14d1d3a..9e5453be6ec8d2cbfaf241a8f42c400c07ef1c2d 100644 (file)
@@ -1950,17 +1950,13 @@ static void pool_mayday_timeout(unsigned long __pool)
  * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.  Called only from
  * manager.
- *
- * RETURNS:
- * %false if no action was taken and pool->lock stayed locked, %true
- * otherwise.
  */
-static bool maybe_create_worker(struct worker_pool *pool)
+static void maybe_create_worker(struct worker_pool *pool)
 __releases(&pool->lock)
 __acquires(&pool->lock)
 {
        if (!need_to_create_worker(pool))
-               return false;
+               return;
 restart:
        spin_unlock_irq(&pool->lock);
 
@@ -1977,7 +1973,7 @@ restart:
                        start_worker(worker);
                        if (WARN_ON_ONCE(need_to_create_worker(pool)))
                                goto restart;
-                       return true;
+                       return;
                }
 
                if (!need_to_create_worker(pool))
@@ -1994,7 +1990,7 @@ restart:
        spin_lock_irq(&pool->lock);
        if (need_to_create_worker(pool))
                goto restart;
-       return true;
+       return;
 }
 
 /**
@@ -2007,15 +2003,9 @@ restart:
  * LOCKING:
  * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Called only from manager.
- *
- * RETURNS:
- * %false if no action was taken and pool->lock stayed locked, %true
- * otherwise.
  */
-static bool maybe_destroy_workers(struct worker_pool *pool)
+static void maybe_destroy_workers(struct worker_pool *pool)
 {
-       bool ret = false;
-
        while (too_many_workers(pool)) {
                struct worker *worker;
                unsigned long expires;
@@ -2029,10 +2019,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
                }
 
                destroy_worker(worker);
-               ret = true;
        }
-
-       return ret;
 }
 
 /**
@@ -2052,13 +2039,14 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
  * multiple times.  Does GFP_KERNEL allocations.
  *
  * RETURNS:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
- * multiple times.  Does GFP_KERNEL allocations.
+ * %false if the pool doesn't need management and the caller can safely
+ * start processing works, %true if management function was performed and
+ * the conditions that the caller verified before calling the function may
+ * no longer be true.
  */
 static bool manage_workers(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
-       bool ret = false;
 
        /*
         * Managership is governed by two mutexes - manager_arb and
@@ -2082,7 +2070,7 @@ static bool manage_workers(struct worker *worker)
         * manager_mutex.
         */
        if (!mutex_trylock(&pool->manager_arb))
-               return ret;
+               return false;
 
        /*
         * With manager arbitration won, manager_mutex would be free in
@@ -2092,7 +2080,6 @@ static bool manage_workers(struct worker *worker)
                spin_unlock_irq(&pool->lock);
                mutex_lock(&pool->manager_mutex);
                spin_lock_irq(&pool->lock);
-               ret = true;
        }
 
        pool->flags &= ~POOL_MANAGE_WORKERS;
@@ -2101,12 +2088,12 @@ static bool manage_workers(struct worker *worker)
         * Destroy and then create so that may_start_working() is true
         * on return.
         */
-       ret |= maybe_destroy_workers(pool);
-       ret |= maybe_create_worker(pool);
+       maybe_destroy_workers(pool);
+       maybe_create_worker(pool);
 
        mutex_unlock(&pool->manager_mutex);
        mutex_unlock(&pool->manager_arb);
-       return ret;
+       return true;
 }
 
 /**
index 43ab4b03d306119b5546fe3c8cdae4432cee28b1..a3aa0406f5b4559ac8fffdf3c96ec9916e80d95f 100644 (file)
@@ -2668,6 +2668,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->get_key)
                return -EOPNOTSUPP;
 
+       if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+               return -ENOENT;
+
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
@@ -2687,10 +2690,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
                goto nla_put_failure;
 
-       if (pairwise && mac_addr &&
-           !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
-               return -ENOENT;
-
        err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
                           get_key_callback);
 
@@ -2861,7 +2860,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
        wdev_lock(dev->ieee80211_ptr);
        err = nl80211_key_allowed(dev->ieee80211_ptr);
 
-       if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
+       if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
            !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
                err = -ENOENT;
 
index dbc55071679081568e6e3e89e256d78eccc74b55..f60d81497f282c2900682ef064bf833992cccbec 100644 (file)
@@ -81,36 +81,6 @@ struct snd_seq_dummy_port {
 
 static int my_client = -1;
 
-/*
- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
- * to subscribers.
- * Note: this callback is called only after all subscribers are removed.
- */
-static int
-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
-{
-       struct snd_seq_dummy_port *p;
-       int i;
-       struct snd_seq_event ev;
-
-       p = private_data;
-       memset(&ev, 0, sizeof(ev));
-       if (p->duplex)
-               ev.source.port = p->connect;
-       else
-               ev.source.port = p->port;
-       ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
-       ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
-       for (i = 0; i < 16; i++) {
-               ev.data.control.channel = i;
-               ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
-               snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
-               ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
-               snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
-       }
-       return 0;
-}
-
 /*
  * event input callback - just redirect events to subscribers
  */
@@ -175,7 +145,6 @@ create_port(int idx, int type)
                | SNDRV_SEQ_PORT_TYPE_PORT;
        memset(&pcb, 0, sizeof(pcb));
        pcb.owner = THIS_MODULE;
-       pcb.unuse = dummy_unuse;
        pcb.event_input = dummy_input;
        pcb.private_free = dummy_free;
        pcb.private_data = rec;
index 5e5af898f7f837164c4b1df775fafda5bb1cfafa..412d90f7b256250ebee6b1b33f4d95150a3c01c3 100644 (file)
@@ -555,7 +555,7 @@ static struct {
        { 22050, 2 },
        { 24000, 2 },
        { 16000, 3 },
-       { 11250, 4 },
+       { 11025, 4 },
        { 12000, 4 },
        {  8000, 5 },
 };