Merge tag 'amlogic-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman...
authorArnd Bergmann <arnd@arndb.de>
Wed, 4 Jan 2017 15:41:18 +0000 (16:41 +0100)
committerArnd Bergmann <arnd@arndb.de>
Wed, 4 Jan 2017 15:42:00 +0000 (16:42 +0100)
Pull "Amlogic fixes for v4.10" from Kevin Hilman:

- DT: GXL: fix GPIO include
- add DT and defconfig for newly merged DRM driver

This pull has one real fix, as a couple non-critical ones.  The DRM
DT/defconfig patches are coming now because I didn't expect the new
driver to make it for the v4.10 merge window, but since it did[1], the
DT and defconfig should go into the same release.

[1] bbbe775ec5b5 drm: Add support for Amlogic Meson Graphic Controller

* tag 'amlogic-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-amlogic:
  ARM64: defconfig: enable DRM_MESON as module
  ARM64: dts: meson-gx: Add Graphic Controller nodes
  ARM64: dts: meson-gxl: fix GPIO include

88 files changed:
Documentation/DocBook/Makefile
Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
Documentation/unaligned-memory-access.txt
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am571x-idk.dts
arch/arm/boot/dts/am572x-idk.dts
arch/arm/boot/dts/am57xx-idk-common.dtsi
arch/arm/boot/dts/dm814x.dtsi
arch/arm/boot/dts/dm816x.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra72-evm-tps65917.dtsi
arch/arm/boot/dts/imx31.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sl.dtsi
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/boot/dts/omap2.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/qcom-apq8064.dtsi
arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-imx/mach-imx1.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/gpio.c [deleted file]
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_common_data.h
arch/arm/mach-omap2/prm_common.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-s3c24xx/common.c
arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
arch/arm64/include/asm/asm-uaccess.h [new file with mode: 0644]
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/entry.S
arch/arm64/lib/clear_user.S
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_to_user.S
arch/arm64/mm/cache.S
arch/arm64/xen/hypercall.S
arch/x86/include/asm/bitops.h
arch/x86/kernel/cpu/mcheck/mce_amd.c
crypto/testmgr.c
drivers/crypto/marvell/cesa.h
drivers/crypto/marvell/hash.c
drivers/crypto/marvell/tdma.c
drivers/firmware/arm_scpi.c
drivers/firmware/psci_checker.c
drivers/net/ethernet/korina.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ipvlan/ipvlan.h
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
fs/dax.c
fs/ext2/inode.c
fs/ext4/file.c
include/dt-bindings/mfd/tps65217.h [deleted file]
include/linux/dax.h
include/linux/filter.h
include/linux/page-flags.h
include/net/netns/ipv4.h
include/net/tcp.h
kernel/cpu.c
mm/filemap.c
mm/truncate.c
net/core/filter.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_ipv4.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/sched/cls_api.c
net/tipc/socket.c

index c75e5d6b8fa8d48b787eed2a0f926bc36fe0a930..a6eb7dcd4dd5c010fe76ac285769d9e0c5157adc 100644 (file)
@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml  \
            kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
            gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
            genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
-           80211.xml sh.xml regulator.xml w1.xml \
+           sh.xml regulator.xml w1.xml \
            writing_musb_glue_layer.xml iio.xml
 
 ifeq ($(DOCBOOKS),)
index 3e5b9793341f4e10679f792c8e19e9fbb55027e1..8682ab6d4a50f86d0d352f6b5b4e8a337c5511c4 100644 (file)
@@ -8,8 +8,9 @@ This driver provides a simple power button event via an Interrupt.
 Required properties:
 - compatible: should be "ti,tps65217-pwrbutton" or "ti,tps65218-pwrbutton"
 
-Required properties for TPS65218:
+Required properties:
 - interrupts: should be one of the following
+   - <2>: For controllers compatible with tps65217
    - <3 IRQ_TYPE_EDGE_BOTH>: For controllers compatible with tps65218
 
 Examples:
@@ -17,6 +18,7 @@ Examples:
 &tps {
        tps65217-pwrbutton {
                compatible = "ti,tps65217-pwrbutton";
+               interrupts = <2>;
        };
 };
 
index 98d131acee95dbff1631e63a5c01db39c2b3def1..a11072c5a8660d362958995a6fd27123c296b2fa 100644 (file)
@@ -2,11 +2,16 @@ TPS65217 Charger
 
 Required Properties:
 -compatible: "ti,tps65217-charger"
+-interrupts: TPS65217 interrupt numbers for the AC and USB charger input change.
+             Should be <0> for the USB charger and <1> for the AC adapter.
+-interrupt-names: Should be "USB" and "AC"
 
 This node is a subnode of the tps65217 PMIC.
 
 Example:
 
        tps65217-charger {
-               compatible = "ti,tps65090-charger";
+               compatible = "ti,tps65217-charger";
+               interrupts = <0>, <1>;
+               interrupt-names = "USB", "AC";
        };
index a445da098bc6e5aa733cd55ca2ee8b4a5f04dc2c..3f76c0c379206a72519e864fb5486abf1a75ac0f 100644 (file)
@@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
 #else
        const u16 *a = (const u16 *)addr1;
        const u16 *b = (const u16 *)addr2;
-       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
 #endif
 }
 
index cfff2c9e3d9470550fd47dcd7b2638c77121c607..c7397bdef186c7d93374b15f88847f5ee3b1afe2 100644 (file)
@@ -9842,7 +9842,7 @@ M:        Mark Rutland <mark.rutland@arm.com>
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 L:     linux-arm-kernel@lists.infradead.org
 S:     Maintained
-F:     drivers/firmware/psci.c
+F:     drivers/firmware/psci*.c
 F:     include/linux/psci.h
 F:     include/uapi/linux/psci.h
 
index ec411ba9e40f98376bb594de459e98413f0dfae9..5470d599384a5ba676a60490e19baf81a1068b65 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Roaring Lionus
 
 # *DOCUMENTATION*
index 5fab553fd03ade24fdc1a69a83e5092b791b8a01..186c4c214e0a756b4468b597d5680093408c28c3 100644 (file)
@@ -1502,8 +1502,7 @@ source kernel/Kconfig.preempt
 
 config HZ_FIXED
        int
-       default 200 if ARCH_EBSA110 || ARCH_S3C24XX || \
-               ARCH_S5PV210 || ARCH_EXYNOS4
+       default 200 if ARCH_EBSA110
        default 128 if SOC_AT91RM9200
        default 0
 
index cccdbcb557b6d29b2e45a0c0b9fb792a469cf1fe..7327250f0bb66e716dacd07d35019c858f38cf68 100644 (file)
@@ -501,6 +501,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
        am3517-evm.dtb \
        am3517_mt_ventoux.dtb \
        logicpd-torpedo-37xx-devkit.dtb \
+       logicpd-som-lv-37xx-devkit.dtb \
        omap3430-sdp.dtb \
        omap3-beagle.dtb \
        omap3-beagle-xm.dtb \
index dc561d505bbe2ce10054c7bc19452b0b4b76fc4f..3e32dd18fd25f5720ca5449ce494c0a3282d4fc4 100644 (file)
@@ -6,8 +6,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <dt-bindings/mfd/tps65217.h>
-
 / {
        cpus {
                cpu@0 {
        ti,pmic-shutdown-controller;
 
        charger {
-               interrupts = <TPS65217_IRQ_AC>, <TPS65217_IRQ_USB>;
-               interrupts-names = "AC", "USB";
+               interrupts = <0>, <1>;
+               interrupt-names = "USB", "AC";
                status = "okay";
        };
 
        pwrbutton {
-               interrupts = <TPS65217_IRQ_PB>;
+               interrupts = <2>;
                status = "okay";
        };
 
index 64c8aa9057a3366b746078a51e662f76a35e8f4b..18d72a245e889ac28561b34417680702fb0e10e1 100644 (file)
@@ -16,6 +16,7 @@
        interrupt-parent = <&intc>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c0;
index ac55f93fc91e997deef1815adeb9d9dee9e2c555..2df9e6050c2f382fea2d09f6a95e634210d3a88f 100644 (file)
@@ -16,6 +16,7 @@
        interrupt-parent = <&wakeupgen>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        memory@0 {
                device_type = "memory";
index d6e43e5184c1829483a3b5b0fb1cc99268a0d611..ad68d1eb3bc3d2a4feec75053323f4ed90d394e4 100644 (file)
                        linux,default-trigger = "mmc0";
                };
        };
-
-       extcon_usb2: extcon_usb2 {
-            compatible = "linux,extcon-usb-gpio";
-            id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
-       };
 };
 
 &mmc1 {
@@ -79,3 +74,8 @@
 &omap_dwc3_2 {
        extcon = <&extcon_usb2>;
 };
+
+&extcon_usb2 {
+       id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
+       vbus-gpio = <&gpio7 22 GPIO_ACTIVE_HIGH>;
+};
index 27d9149cedba78c81c7025a2404d3f80b14524f6..8350b4b34b085231663c9235b570e487827a5000 100644 (file)
                reg = <0x0 0x80000000 0x0 0x80000000>;
        };
 
-       extcon_usb2: extcon_usb2 {
-               compatible = "linux,extcon-usb-gpio";
-               id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
-       };
-
        status-leds {
                compatible = "gpio-leds";
                cpu0-led {
        extcon = <&extcon_usb2>;
 };
 
+&extcon_usb2 {
+       id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+       vbus-gpio = <&gpio3 26 GPIO_ACTIVE_HIGH>;
+};
+
 &mmc1 {
        status = "okay";
        vmmc-supply = <&v3_3d>;
@@ -87,3 +87,7 @@
 &sn65hvs882 {
        load-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
 };
+
+&pcie1 {
+       gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+};
index 555ae21f2b9adcf2ad811b5541a3f865997bb098..814a720d5c3db5a9d1a5c94719fdf89207c435e2 100644 (file)
                        gpio-controller;
                        #gpio-cells = <2>;
                };
+
+               extcon_usb2: tps659038_usb {
+                       compatible = "ti,palmas-usb-vid";
+                       ti,enable-vbus-detection;
+                       ti,enable-id-detection;
+                       /* ID & VBUS GPIOs provided in board dts */
+               };
        };
 };
 
 };
 
 &usb2 {
-       dr_mode = "otg";
+       dr_mode = "peripheral";
 };
 
 &mmc2 {
index 1facc5f12cef700acf90e0b2f2e8594096a9bf9a..81b8cecb58206d8c98d9d986a1995e67e17fe7ed 100644 (file)
@@ -12,6 +12,7 @@
        interrupt-parent = <&intc>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index 61dd2f6b02bcfe47d7d6a5ddd5734de0e5687e89..6db652ae9bd558021ac99a6a8bcb954a5c94b806 100644 (file)
@@ -12,6 +12,7 @@
        interrupt-parent = <&intc>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index addb7530cfbe0dcece696c2e84fbb5ea8896ceb2..1faf24acd521d3cc4ed22d24c8fe0ff469e9a30a 100644 (file)
@@ -18,6 +18,7 @@
 
        compatible = "ti,dra7xx";
        interrupt-parent = <&crossbar_mpu>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index ee6dac44edf1ada06b65a48d213ec9d8d966119b..e6df676886c0c35052921669039ae31fd446ccbf 100644 (file)
                ti,palmas-long-press-seconds = <6>;
        };
 };
+
+&usb2_phy1 {
+       phy-supply = <&ldo4_reg>;
+};
+
+&usb2_phy2 {
+       phy-supply = <&ldo4_reg>;
+};
+
+&dss {
+       vdda_video-supply = <&ldo5_reg>;
+};
+
+&mmc1 {
+       vmmc_aux-supply = <&ldo1_reg>;
+};
index 685916e3d8a1eaefb083d2a7a81583c18a360168..85cd8be22f7155edae2d56ac5a99984427aa6131 100644 (file)
                };
        };
 
-       avic: avic-interrupt-controller@60000000 {
+       avic: interrupt-controller@68000000 {
                compatible = "fsl,imx31-avic", "fsl,avic";
                interrupt-controller;
                #interrupt-cells = <1>;
-               reg = <0x60000000 0x100000>;
+               reg = <0x68000000 0x100000>;
        };
 
        soc {
index e476d01959ea3f337eb966559c241b3ef82f7fba..26d0604847282879f2286ac9c7242a84b8ac271b 100644 (file)
                                MX6QDL_PAD_SD2_DAT1__SD2_DATA1          0x17071
                                MX6QDL_PAD_SD2_DAT2__SD2_DATA2          0x17071
                                MX6QDL_PAD_SD2_DAT3__SD2_DATA3          0x17071
-                               MX6QDL_PAD_NANDF_CS2__GPIO6_IO15        0x000b0
                        >;
                };
 
index 53e6e63cbb0235d634f108f12ad1fd75001817dd..89b834f3fa17f6b576e31fd61e8bfad205f73923 100644 (file)
                                interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6QDL_CLK_EIM_SLOW>;
                                fsl,weim-cs-gpr = <&gpr>;
+                               status = "disabled";
                        };
 
                        ocotp: ocotp@021bc000 {
index 4fd6de29f07db21ba1c4552b8d4e37c59858699f..19cbd879c448984717a83e1d819efdec822c4957 100644 (file)
                                reg = <0x021b8000 0x4000>;
                                interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
                                fsl,weim-cs-gpr = <&gpr>;
+                               status = "disabled";
                        };
 
                        ocotp: ocotp@021bc000 {
index 076a30f9bcae26d8e62b119ee92fd71d1944e922..10f33301619777a9d676eb49bae893540d920973 100644 (file)
                                interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6SX_CLK_EIM_SLOW>;
                                fsl,weim-cs-gpr = <&gpr>;
+                               status = "disabled";
                        };
 
                        ocotp: ocotp@021bc000 {
index 4f793a025a721b03f3a5e1f074c4c531033cf62f..f1d6de8b3c193eee0d88b0465f33de4e122ba266 100644 (file)
@@ -17,6 +17,7 @@
        interrupt-parent = <&intc>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                serial0 = &uart1;
index 87ca50b53002b9cf244b9dca24a0f2fbdca3dabc..4d448f145ed1c2941bda6d76e91d7573c0a37568 100644 (file)
        vmmc_aux-supply = <&vsim>;
        bus-width = <8>;
        non-removable;
+       no-sdio;
+       no-sd;
 };
 
 &mmc3 {
index ecf5eb584c75058598b9c90bc3f3568bf3a7ce7e..a3ff4933dbc173936bbec5a222aba6642e70564d 100644 (file)
@@ -17,6 +17,7 @@
        interrupt-parent = <&intc>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index 8087456b5fbec60c9379e5937b299ce820a7cb03..578c53f08309090069454261e09bb0f1617e4a98 100644 (file)
@@ -15,6 +15,7 @@
        interrupt-parent = <&wakeupgen>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index 968c67a49dbd158b3b3ca24ad2bd85b6f45687d6..7cd92babc41a688cf8bb474972d053ea7b8b33d6 100644 (file)
@@ -17,6 +17,7 @@
 
        compatible = "ti,omap5";
        interrupt-parent = <&wakeupgen>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index 268bd470c865e6022d907ca495b6913acfd7c1ef..407a4610f4a7e055a488defe3fc52c05f1b3fa63 100644 (file)
@@ -4,6 +4,7 @@
 #include <dt-bindings/clock/qcom,gcc-msm8960.h>
 #include <dt-bindings/reset/qcom,gcc-msm8960.h>
 #include <dt-bindings/clock/qcom,mmcc-msm8960.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
 #include <dt-bindings/soc/qcom,gsbi.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
        firmware {
                scm {
                        compatible = "qcom,scm-apq8064";
+
+                       clocks = <&rpmcc RPM_DAYTONA_FABRIC_CLK>;
+                       clock-names = "core";
                };
        };
 
index 102838fcc5880ca0f4fbb90a23098191004d9386..15f4fd3f469561caa7fba30fed8c6a10c18c6668 100644 (file)
@@ -81,7 +81,7 @@
                #address-cells = <0>;
                interrupt-controller;
                reg = <0 0x2c001000 0 0x1000>,
-                     <0 0x2c002000 0 0x1000>,
+                     <0 0x2c002000 0 0x2000>,
                      <0 0x2c004000 0 0x2000>,
                      <0 0x2c006000 0 0x2000>;
                interrupts = <1 9 0xf04>;
index 45d08cc37b0134c71b11bb580fb574386b46c47b..bd107c5a02267f33a02e31e0e5fd116672684d73 100644 (file)
                #address-cells = <0>;
                interrupt-controller;
                reg = <0 0x2c001000 0 0x1000>,
-                     <0 0x2c002000 0 0x1000>,
+                     <0 0x2c002000 0 0x2000>,
                      <0 0x2c004000 0 0x2000>,
                      <0 0x2c006000 0 0x2000>;
                interrupts = <1 9 0xf04>;
index 7ea617e47fe41123193bc10a9cb331d9687e015a..958b4c42d32040105fd545ee2e62ceaadb0b682c 100644 (file)
                                        switch0phy1: switch1phy0@1 {
                                                reg = <1>;
                                                interrupt-parent = <&switch0>;
-                                               interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;                                   };
+                                               interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+                                       };
                                        switch0phy2: switch1phy0@2 {
                                                reg = <2>;
                                                interrupt-parent = <&switch0>;
index 98ffe1e62ad5d6debe7c087743d728d5730c26d3..a5d68411a037994cfcf7f3c2b62c0afb5d91617f 100644 (file)
@@ -385,36 +385,6 @@ fail:
        return pen_release != -1 ? ret : 0;
 }
 
-/*
- * Initialise the CPU possible map early - this describes the CPUs
- * which may be present or become present in the system.
- */
-
-static void __init exynos_smp_init_cpus(void)
-{
-       void __iomem *scu_base = scu_base_addr();
-       unsigned int i, ncores;
-
-       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
-               ncores = scu_base ? scu_get_core_count(scu_base) : 1;
-       else
-               /*
-                * CPU Nodes are passed thru DT and set_cpu_possible
-                * is set by "arm_dt_init_cpu_maps".
-                */
-               return;
-
-       /* sanity check */
-       if (ncores > nr_cpu_ids) {
-               pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
-                       ncores, nr_cpu_ids);
-               ncores = nr_cpu_ids;
-       }
-
-       for (i = 0; i < ncores; i++)
-               set_cpu_possible(i, true);
-}
-
 static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
 {
        int i;
@@ -479,7 +449,6 @@ static void exynos_cpu_die(unsigned int cpu)
 #endif /* CONFIG_HOTPLUG_CPU */
 
 const struct smp_operations exynos_smp_ops __initconst = {
-       .smp_init_cpus          = exynos_smp_init_cpus,
        .smp_prepare_cpus       = exynos_smp_prepare_cpus,
        .smp_secondary_init     = exynos_secondary_init,
        .smp_boot_secondary     = exynos_boot_secondary,
index de5ab8d88549de87ea88fc92c2bf4eda7a022e38..3a8406e45b65dceedf0abdb02f59b4b30bf394a8 100644 (file)
@@ -37,7 +37,6 @@ static const char * const imx1_dt_board_compat[] __initconst = {
 };
 
 DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
-       .map_io         = debug_ll_io_init,
        .init_early     = imx1_init_early,
        .init_irq       = imx1_init_irq,
        .dt_compat      = imx1_dt_board_compat,
index 469894082fea00c9605ea4dc370d69b18f5b40d3..093458b62c8dadbcc3c7cc1c3b66d84e59af3d8d 100644 (file)
@@ -7,7 +7,7 @@ ccflags-y := -I$(srctree)/$(src)/include \
 
 # Common support
 obj-y := id.o io.o control.o devices.o fb.o timer.o pm.o \
-        common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
+        common.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
         omap_device.o omap-headsmp.o sram.o drm.o
 
 hwmod-common                           = omap_hwmod.o omap_hwmod_reset.o \
index 36d9943205ca4bb7bff163656f7760129c620e3c..dc9e34e670a26f280bdfe7947fa8709ee750465f 100644 (file)
@@ -304,7 +304,7 @@ DT_MACHINE_START(AM43_DT, "Generic AM43 (Flattened Device Tree)")
        .init_late      = am43xx_init_late,
        .init_irq       = omap_gic_of_init,
        .init_machine   = omap_generic_init,
-       .init_time      = omap4_local_timer_init,
+       .init_time      = omap3_gptimer_timer_init,
        .dt_compat      = am43_boards_compat,
        .restart        = omap44xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
deleted file mode 100644 (file)
index 7a57714..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * OMAP2+ specific gpio initialization
- *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
- *
- * Author:
- *     Charulatha V <charu@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/gpio.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/platform_data/gpio-omap.h>
-
-#include "soc.h"
-#include "omap_hwmod.h"
-#include "omap_device.h"
-#include "omap-pm.h"
-
-#include "powerdomain.h"
-
-static int __init omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
-{
-       struct platform_device *pdev;
-       struct omap_gpio_platform_data *pdata;
-       struct omap_gpio_dev_attr *dev_attr;
-       char *name = "omap_gpio";
-       int id;
-       struct powerdomain *pwrdm;
-
-       /*
-        * extract the device id from name field available in the
-        * hwmod database and use the same for constructing ids for
-        * gpio devices.
-        * CAUTION: Make sure the name in the hwmod database does
-        * not change. If changed, make corresponding change here
-        * or make use of static variable mechanism to handle this.
-        */
-       sscanf(oh->name, "gpio%d", &id);
-
-       pdata = kzalloc(sizeof(struct omap_gpio_platform_data), GFP_KERNEL);
-       if (!pdata) {
-               pr_err("gpio%d: Memory allocation failed\n", id);
-               return -ENOMEM;
-       }
-
-       dev_attr = (struct omap_gpio_dev_attr *)oh->dev_attr;
-       pdata->bank_width = dev_attr->bank_width;
-       pdata->dbck_flag = dev_attr->dbck_flag;
-       pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count;
-       pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
-       if (!pdata->regs) {
-               pr_err("gpio%d: Memory allocation failed\n", id);
-               kfree(pdata);
-               return -ENOMEM;
-       }
-
-       switch (oh->class->rev) {
-       case 0:
-               if (id == 1)
-                       /* non-wakeup GPIO pins for OMAP2 Bank1 */
-                       pdata->non_wakeup_gpios = 0xe203ffc0;
-               else if (id == 2)
-                       /* non-wakeup GPIO pins for OMAP2 Bank2 */
-                       pdata->non_wakeup_gpios = 0x08700040;
-               /* fall through */
-
-       case 1:
-               pdata->regs->revision = OMAP24XX_GPIO_REVISION;
-               pdata->regs->direction = OMAP24XX_GPIO_OE;
-               pdata->regs->datain = OMAP24XX_GPIO_DATAIN;
-               pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT;
-               pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT;
-               pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT;
-               pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1;
-               pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2;
-               pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1;
-               pdata->regs->irqenable2 = OMAP24XX_GPIO_IRQENABLE2;
-               pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1;
-               pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1;
-               pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL;
-               pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN;
-               pdata->regs->ctrl = OMAP24XX_GPIO_CTRL;
-               pdata->regs->wkup_en = OMAP24XX_GPIO_WAKE_EN;
-               pdata->regs->leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0;
-               pdata->regs->leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1;
-               pdata->regs->risingdetect = OMAP24XX_GPIO_RISINGDETECT;
-               pdata->regs->fallingdetect = OMAP24XX_GPIO_FALLINGDETECT;
-               break;
-       case 2:
-               pdata->regs->revision = OMAP4_GPIO_REVISION;
-               pdata->regs->direction = OMAP4_GPIO_OE;
-               pdata->regs->datain = OMAP4_GPIO_DATAIN;
-               pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
-               pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
-               pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
-               pdata->regs->irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0;
-               pdata->regs->irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1;
-               pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
-               pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
-               pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
-               pdata->regs->irqenable2 = OMAP4_GPIO_IRQSTATUSSET1;
-               pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0;
-               pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0;
-               pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME;
-               pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE;
-               pdata->regs->ctrl = OMAP4_GPIO_CTRL;
-               pdata->regs->wkup_en = OMAP4_GPIO_IRQWAKEN0;
-               pdata->regs->leveldetect0 = OMAP4_GPIO_LEVELDETECT0;
-               pdata->regs->leveldetect1 = OMAP4_GPIO_LEVELDETECT1;
-               pdata->regs->risingdetect = OMAP4_GPIO_RISINGDETECT;
-               pdata->regs->fallingdetect = OMAP4_GPIO_FALLINGDETECT;
-               break;
-       default:
-               WARN(1, "Invalid gpio bank_type\n");
-               kfree(pdata->regs);
-               kfree(pdata);
-               return -EINVAL;
-       }
-
-       pwrdm = omap_hwmod_get_pwrdm(oh);
-       pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm);
-
-       pdev = omap_device_build(name, id - 1, oh, pdata, sizeof(*pdata));
-       kfree(pdata);
-
-       if (IS_ERR(pdev)) {
-               WARN(1, "Can't build omap_device for %s:%s.\n",
-                                       name, oh->name);
-               return PTR_ERR(pdev);
-       }
-
-       return 0;
-}
-
-/*
- * gpio_init needs to be done before
- * machine_init functions access gpio APIs.
- * Hence gpio_init is a omap_postcore_initcall.
- */
-static int __init omap2_gpio_init(void)
-{
-       /* If dtb is there, the devices will be created dynamically */
-       if (of_have_populated_dt())
-               return -ENODEV;
-
-       return omap_hwmod_for_each_by_class("gpio", omap2_gpio_dev_init, NULL);
-}
-omap_postcore_initcall(omap2_gpio_init);
index 759e1d45ba25c3977af1d7048bec577062a51b66..e8b988714a09f842c01b5ea8a22d09c31ba20f1d 100644 (file)
@@ -741,14 +741,14 @@ static int _init_main_clk(struct omap_hwmod *oh)
        int ret = 0;
        char name[MOD_CLK_MAX_NAME_LEN];
        struct clk *clk;
+       static const char modck[] = "_mod_ck";
 
-       /* +7 magic comes from '_mod_ck' suffix */
-       if (strlen(oh->name) + 7 > MOD_CLK_MAX_NAME_LEN)
+       if (strlen(oh->name) >= MOD_CLK_MAX_NAME_LEN - strlen(modck))
                pr_warn("%s: warning: cropping name for %s\n", __func__,
                        oh->name);
 
-       strncpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - 7);
-       strcat(name, "_mod_ck");
+       strlcpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - strlen(modck));
+       strlcat(name, modck, MOD_CLK_MAX_NAME_LEN);
 
        clk = clk_get(NULL, name);
        if (!IS_ERR(clk)) {
index cdfbb44ceb0c4f3059bc6568e2e765304f7e374a..f22e9cb39f4ac16af15020a0990e32dd975a9bd3 100644 (file)
@@ -121,10 +121,6 @@ extern struct omap_hwmod_irq_info omap2_uart3_mpu_irqs[];
 extern struct omap_hwmod_irq_info omap2_dispc_irqs[];
 extern struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[];
 extern struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio1_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio2_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio3_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio4_irqs[];
 extern struct omap_hwmod_irq_info omap2_dma_system_irqs[];
 extern struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[];
 extern struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[];
index 5b2f5138d938ac626a1895b71a9a44cd0d757785..2b138b65129a5d609ff2ae02a55181c10154113b 100644 (file)
@@ -295,10 +295,8 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
                GFP_KERNEL);
 
        if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
-           !prcm_irq_setup->priority_mask) {
-               pr_err("PRCM: kzalloc failed\n");
+           !prcm_irq_setup->priority_mask)
                goto err;
-       }
 
        memset(mask, 0, sizeof(mask));
 
index 56128da23c3a1531994e37522a05e625764cc6ee..07dd692c47372f8aa145d00cb5532b75774429c5 100644 (file)
@@ -510,18 +510,19 @@ void __init omap3_secure_sync32k_timer_init(void)
 }
 #endif /* CONFIG_ARCH_OMAP3 */
 
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX)
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) || \
+       defined(CONFIG_SOC_AM43XX)
 void __init omap3_gptimer_timer_init(void)
 {
        __omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
                        1, "timer_sys_ck", "ti,timer-alwon", true);
-
-       clocksource_probe();
+       if (of_have_populated_dt())
+               clocksource_probe();
 }
 #endif
 
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) ||         \
-       defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
+       defined(CONFIG_SOC_DRA7XX)
 static void __init omap4_sync32k_timer_init(void)
 {
        __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
index f6c3f151d0d48c2c62ca056a18fbbe16edc309d9..b59f4f4f256f2bd6785b086c40bbefb0bb68315c 100644 (file)
@@ -345,10 +345,40 @@ static struct s3c24xx_dma_channel s3c2410_dma_channels[DMACH_MAX] = {
        [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
 };
 
+static const struct dma_slave_map s3c2410_dma_slave_map[] = {
+       { "s3c2410-sdi", "rx-tx", (void *)DMACH_SDI },
+       { "s3c2410-spi.0", "rx", (void *)DMACH_SPI0_RX },
+       { "s3c2410-spi.0", "tx", (void *)DMACH_SPI0_TX },
+       { "s3c2410-spi.1", "rx", (void *)DMACH_SPI1_RX },
+       { "s3c2410-spi.1", "tx", (void *)DMACH_SPI1_TX },
+       /*
+        * The DMA request source[1] (DMACH_UARTx_SRC2) are
+        * not used in the UART driver.
+        */
+       { "s3c2410-uart.0", "rx", (void *)DMACH_UART0 },
+       { "s3c2410-uart.0", "tx", (void *)DMACH_UART0 },
+       { "s3c2410-uart.1", "rx", (void *)DMACH_UART1 },
+       { "s3c2410-uart.1", "tx", (void *)DMACH_UART1 },
+       { "s3c2410-uart.2", "rx", (void *)DMACH_UART2 },
+       { "s3c2410-uart.2", "tx", (void *)DMACH_UART2 },
+       { "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
+       { "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
+       { "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
+       { "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
+       { "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
+       { "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
+       { "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
+       { "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
+       { "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
+       { "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
+};
+
 static struct s3c24xx_dma_platdata s3c2410_dma_platdata = {
        .num_phy_channels = 4,
        .channels = s3c2410_dma_channels,
        .num_channels = DMACH_MAX,
+       .slave_map = s3c2410_dma_slave_map,
+       .slavecnt = ARRAY_SIZE(s3c2410_dma_slave_map),
 };
 
 struct platform_device s3c2410_device_dma = {
@@ -388,10 +418,36 @@ static struct s3c24xx_dma_channel s3c2412_dma_channels[DMACH_MAX] = {
        [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, 16 },
 };
 
+static const struct dma_slave_map s3c2412_dma_slave_map[] = {
+       { "s3c2412-sdi", "rx-tx", (void *)DMACH_SDI },
+       { "s3c2412-spi.0", "rx", (void *)DMACH_SPI0_RX },
+       { "s3c2412-spi.0", "tx", (void *)DMACH_SPI0_TX },
+       { "s3c2412-spi.1", "rx", (void *)DMACH_SPI1_RX },
+       { "s3c2412-spi.1", "tx", (void *)DMACH_SPI1_TX },
+       { "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
+       { "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
+       { "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
+       { "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
+       { "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
+       { "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
+       { "s3c2412-iis", "rx", (void *)DMACH_I2S_IN },
+       { "s3c2412-iis", "tx", (void *)DMACH_I2S_OUT },
+       { "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
+       { "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
+       { "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
+       { "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
+       { "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
+       { "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
+       { "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
+       { "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
+};
+
 static struct s3c24xx_dma_platdata s3c2412_dma_platdata = {
        .num_phy_channels = 4,
        .channels = s3c2412_dma_channels,
        .num_channels = DMACH_MAX,
+       .slave_map = s3c2412_dma_slave_map,
+       .slavecnt = ARRAY_SIZE(s3c2412_dma_slave_map),
 };
 
 struct platform_device s3c2412_device_dma = {
@@ -534,10 +590,30 @@ static struct s3c24xx_dma_channel s3c2443_dma_channels[DMACH_MAX] = {
        [DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, 29 },
 };
 
+static const struct dma_slave_map s3c2443_dma_slave_map[] = {
+       { "s3c2440-sdi", "rx-tx", (void *)DMACH_SDI },
+       { "s3c2443-spi.0", "rx", (void *)DMACH_SPI0_RX },
+       { "s3c2443-spi.0", "tx", (void *)DMACH_SPI0_TX },
+       { "s3c2443-spi.1", "rx", (void *)DMACH_SPI1_RX },
+       { "s3c2443-spi.1", "tx", (void *)DMACH_SPI1_TX },
+       { "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
+       { "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
+       { "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
+       { "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
+       { "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
+       { "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
+       { "s3c2440-uart.3", "rx", (void *)DMACH_UART3 },
+       { "s3c2440-uart.3", "tx", (void *)DMACH_UART3 },
+       { "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
+       { "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
+};
+
 static struct s3c24xx_dma_platdata s3c2443_dma_platdata = {
        .num_phy_channels = 6,
        .channels = s3c2443_dma_channels,
        .num_channels = DMACH_MAX,
+       .slave_map = s3c2443_dma_slave_map,
+       .slavecnt = ARRAY_SIZE(s3c2443_dma_slave_map),
 };
 
 struct platform_device s3c2443_device_dma = {
index a852e28a40e17761b5b393b21d9798eb9a80cd8b..a83ed2c6bbf79afd64c24577ca2530e7da6f0b87 100644 (file)
@@ -81,7 +81,7 @@
                #address-cells = <0>;
                interrupt-controller;
                reg = <0x0 0x2c001000 0 0x1000>,
-                     <0x0 0x2c002000 0 0x1000>,
+                     <0x0 0x2c002000 0 0x2000>,
                      <0x0 0x2c004000 0 0x2000>,
                      <0x0 0x2c006000 0 0x2000>;
                interrupts = <1 9 0xf04>;
index 9d1d7ad9b075a314d5777260622d66487c82cf45..29ed6b61c737a7827eb8caae01ef335fdbd222b1 100644 (file)
                        reg = <0x0 0x86000000 0x0 0x200000>;
                        no-map;
                };
+
+               memory@85800000 {
+                       reg = <0x0 0x85800000 0x0 0x800000>;
+                       no-map;
+               };
+
+               memory@86200000 {
+                       reg = <0x0 0x86200000 0x0 0x2600000>;
+                       no-map;
+               };
        };
 
        cpus {
index 6ffb0517421a92f40d43ffa37682bfd825efca25..dbea2c3d8f0c229f05573f94b9318837a8c11300 100644 (file)
                power-source = <3300>;
        };
 
-       sdhi0_pins_uhs: sd0 {
+       sdhi0_pins_uhs: sd0_uhs {
                groups = "sdhi0_data4", "sdhi0_ctrl";
                function = "sdhi0";
                power-source = <1800>;
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
new file mode 100644 (file)
index 0000000..df411f3
--- /dev/null
@@ -0,0 +1,65 @@
+#ifndef __ASM_ASM_UACCESS_H
+#define __ASM_ASM_UACCESS_H
+
+#include <asm/alternative.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/sysreg.h>
+#include <asm/assembler.h>
+
+/*
+ * User access enabling/disabling macros.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       .macro  __uaccess_ttbr0_disable, tmp1
+       mrs     \tmp1, ttbr1_el1                // swapper_pg_dir
+       add     \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
+       msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
+       isb
+       .endm
+
+       .macro  __uaccess_ttbr0_enable, tmp1
+       get_thread_info \tmp1
+       ldr     \tmp1, [\tmp1, #TSK_TI_TTBR0]   // load saved TTBR0_EL1
+       msr     ttbr0_el1, \tmp1                // set the non-PAN TTBR0_EL1
+       isb
+       .endm
+
+       .macro  uaccess_ttbr0_disable, tmp1
+alternative_if_not ARM64_HAS_PAN
+       __uaccess_ttbr0_disable \tmp1
+alternative_else_nop_endif
+       .endm
+
+       .macro  uaccess_ttbr0_enable, tmp1, tmp2
+alternative_if_not ARM64_HAS_PAN
+       save_and_disable_irq \tmp2              // avoid preemption
+       __uaccess_ttbr0_enable \tmp1
+       restore_irq \tmp2
+alternative_else_nop_endif
+       .endm
+#else
+       .macro  uaccess_ttbr0_disable, tmp1
+       .endm
+
+       .macro  uaccess_ttbr0_enable, tmp1, tmp2
+       .endm
+#endif
+
+/*
+ * These macros are no-ops when UAO is present.
+ */
+       .macro  uaccess_disable_not_uao, tmp1
+       uaccess_ttbr0_disable \tmp1
+alternative_if ARM64_ALT_PAN_NOT_UAO
+       SET_PSTATE_PAN(1)
+alternative_else_nop_endif
+       .endm
+
+       .macro  uaccess_enable_not_uao, tmp1, tmp2
+       uaccess_ttbr0_enable \tmp1, \tmp2
+alternative_if ARM64_ALT_PAN_NOT_UAO
+       SET_PSTATE_PAN(0)
+alternative_else_nop_endif
+       .endm
+
+#endif
index d26750ca6e066b9f0f267025512b3e2bdef13744..46da3ea638bbdfc858161f3e00ac560562692359 100644 (file)
@@ -22,8 +22,6 @@
 #include <asm/kernel-pgtable.h>
 #include <asm/sysreg.h>
 
-#ifndef __ASSEMBLY__
-
 /*
  * User space memory access functions
  */
@@ -424,66 +422,4 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
-#else  /* __ASSEMBLY__ */
-
-#include <asm/assembler.h>
-
-/*
- * User access enabling/disabling macros.
- */
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-       .macro  __uaccess_ttbr0_disable, tmp1
-       mrs     \tmp1, ttbr1_el1                // swapper_pg_dir
-       add     \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
-       msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
-       isb
-       .endm
-
-       .macro  __uaccess_ttbr0_enable, tmp1
-       get_thread_info \tmp1
-       ldr     \tmp1, [\tmp1, #TSK_TI_TTBR0]   // load saved TTBR0_EL1
-       msr     ttbr0_el1, \tmp1                // set the non-PAN TTBR0_EL1
-       isb
-       .endm
-
-       .macro  uaccess_ttbr0_disable, tmp1
-alternative_if_not ARM64_HAS_PAN
-       __uaccess_ttbr0_disable \tmp1
-alternative_else_nop_endif
-       .endm
-
-       .macro  uaccess_ttbr0_enable, tmp1, tmp2
-alternative_if_not ARM64_HAS_PAN
-       save_and_disable_irq \tmp2              // avoid preemption
-       __uaccess_ttbr0_enable \tmp1
-       restore_irq \tmp2
-alternative_else_nop_endif
-       .endm
-#else
-       .macro  uaccess_ttbr0_disable, tmp1
-       .endm
-
-       .macro  uaccess_ttbr0_enable, tmp1, tmp2
-       .endm
-#endif
-
-/*
- * These macros are no-ops when UAO is present.
- */
-       .macro  uaccess_disable_not_uao, tmp1
-       uaccess_ttbr0_disable \tmp1
-alternative_if ARM64_ALT_PAN_NOT_UAO
-       SET_PSTATE_PAN(1)
-alternative_else_nop_endif
-       .endm
-
-       .macro  uaccess_enable_not_uao, tmp1, tmp2
-       uaccess_ttbr0_enable \tmp1, \tmp2
-alternative_if ARM64_ALT_PAN_NOT_UAO
-       SET_PSTATE_PAN(0)
-alternative_else_nop_endif
-       .endm
-
-#endif /* __ASSEMBLY__ */
-
 #endif /* __ASM_UACCESS_H */
index a7504f40d7ee45f7d8a2ac3dcc9bcbff397eb708..923841ffe4a981669be9bea76d09540244550a1f 100644 (file)
@@ -31,7 +31,7 @@
 #include <asm/memory.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
 
 /*
index add4a1334085b1eef342155089bb3d2c4b560621..e88fb99c15616397e6c4bbc0dd9c26e504688d97 100644 (file)
@@ -17,7 +17,7 @@
  */
 #include <linux/linkage.h>
 
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
 
        .text
 
index fd6cd05593f99ea8bff2e7bdae6168d5639067b0..4b5d826895ff161347dec9dd48710d08bcc49699 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/linkage.h>
 
 #include <asm/cache.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
 
 /*
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
index d828540ded6facff4c578c95c75a4163b584184d..47184c3a97da6faa4cce2e31b37f91d95d361ee6 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/linkage.h>
 
 #include <asm/cache.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
 
 /*
  * Copy from user space to user space (alignment handled by the hardware)
index 3e6ae2663b8221033eee04a0667b04f43f84db66..351f0766f7a61c54a47427c6a6845521fda6066d 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/linkage.h>
 
 #include <asm/cache.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
 
 /*
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
index 17f422a4dc5538a534b885da74e85ea251e25a3a..83c27b6e6dca31007d3b9ae5bc1c48d66456e757 100644 (file)
@@ -23,7 +23,7 @@
 #include <asm/assembler.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
 
 /*
  *     flush_icache_range(start,end)
index 47cf3f9d89ffffb13ce020b9197d4ad962892681..947830a459d2ca6ef83fb92a4d168b55ea5c659d 100644 (file)
@@ -49,7 +49,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
 #include <xen/interface/xen.h>
 
 
index 68557f52b9619ddfed7681fc43c2d0530c625f0f..854022772c5be4d49d2697bd2b66b454f49c9e6f 100644 (file)
@@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
        asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
 }
 
+static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+{
+       bool negative;
+       asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
+               CC_SET(s)
+               : CC_OUT(s) (negative), ADDR
+               : "ir" ((char) ~(1 << nr)) : "memory");
+       return negative;
+}
+
+// Let everybody know we have it
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+
 /*
  * __clear_bit_unlock - Clears a bit in memory
  * @nr: Bit to clear
index ffacfdcacb85eafd145228d0e0d3e4765b2177ea..a5fd137417a27278f49f3cf0b60803997176a665 100644 (file)
@@ -1182,6 +1182,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
        const char *name = get_name(bank, NULL);
        int err = 0;
 
+       if (!dev)
+               return -ENODEV;
+
        if (is_shared_bank(bank)) {
                nb = node_to_amd_nb(amd_get_nb_id(cpu));
 
index f616ad74cce756fb2d0a0657d153483ed05f56d7..44e888b0b041944b44e8aa5d75619e51d7fe52a2 100644 (file)
@@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
        for (i = 0; i < ctcount; i++) {
                unsigned int dlen = COMP_BUF_SIZE;
                int ilen = ctemplate[i].inlen;
+               void *input_vec;
 
+               input_vec = kmalloc(ilen, GFP_KERNEL);
+               if (!input_vec) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               memcpy(input_vec, ctemplate[i].input, ilen);
                memset(output, 0, dlen);
                init_completion(&result.completion);
-               sg_init_one(&src, ctemplate[i].input, ilen);
+               sg_init_one(&src, input_vec, ilen);
                sg_init_one(&dst, output, dlen);
 
                req = acomp_request_alloc(tfm);
                if (!req) {
                        pr_err("alg: acomp: request alloc failed for %s\n",
                               algo);
+                       kfree(input_vec);
                        ret = -ENOMEM;
                        goto out;
                }
@@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                if (ret) {
                        pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
                               i + 1, algo, -ret);
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                        pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
                               i + 1, algo, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                               i + 1, algo);
                        hexdump(output, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
 
+               kfree(input_vec);
                acomp_request_free(req);
        }
 
        for (i = 0; i < dtcount; i++) {
                unsigned int dlen = COMP_BUF_SIZE;
                int ilen = dtemplate[i].inlen;
+               void *input_vec;
+
+               input_vec = kmalloc(ilen, GFP_KERNEL);
+               if (!input_vec) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
+               memcpy(input_vec, dtemplate[i].input, ilen);
                memset(output, 0, dlen);
                init_completion(&result.completion);
-               sg_init_one(&src, dtemplate[i].input, ilen);
+               sg_init_one(&src, input_vec, ilen);
                sg_init_one(&dst, output, dlen);
 
                req = acomp_request_alloc(tfm);
                if (!req) {
                        pr_err("alg: acomp: request alloc failed for %s\n",
                               algo);
+                       kfree(input_vec);
                        ret = -ENOMEM;
                        goto out;
                }
@@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                if (ret) {
                        pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
                               i + 1, algo, -ret);
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                        pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
                               i + 1, algo, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                               i + 1, algo);
                        hexdump(output, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
 
+               kfree(input_vec);
                acomp_request_free(req);
        }
 
index a768da7138a1cd4a0a79771ad99b2f5436f481c3..b7872f62f67475fdc6889420c38de4f8418ffafd 100644 (file)
@@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
 #define CESA_TDMA_SRC_IN_SRAM                  BIT(30)
 #define CESA_TDMA_END_OF_REQ                   BIT(29)
 #define CESA_TDMA_BREAK_CHAIN                  BIT(28)
-#define CESA_TDMA_TYPE_MSK                     GENMASK(27, 0)
+#define CESA_TDMA_SET_STATE                    BIT(27)
+#define CESA_TDMA_TYPE_MSK                     GENMASK(26, 0)
 #define CESA_TDMA_DUMMY                                0
 #define CESA_TDMA_DATA                         1
 #define CESA_TDMA_OP                           2
index 317cf029c0cf1beab3f2b5f0ab41cf187626e24b..77c0fb936f4794363f478b7bc0b8d23dd10d880f 100644 (file)
@@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
        sreq->offset = 0;
 }
 
+static void mv_cesa_ahash_dma_step(struct ahash_request *req)
+{
+       struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+       struct mv_cesa_req *base = &creq->base;
+
+       /* We must explicitly set the digest state. */
+       if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
+               struct mv_cesa_engine *engine = base->engine;
+               int i;
+
+               /* Set the hash state in the IVDIG regs. */
+               for (i = 0; i < ARRAY_SIZE(creq->state); i++)
+                       writel_relaxed(creq->state[i], engine->regs +
+                                      CESA_IVDIG(i));
+       }
+
+       mv_cesa_dma_step(base);
+}
+
 static void mv_cesa_ahash_step(struct crypto_async_request *req)
 {
        struct ahash_request *ahashreq = ahash_request_cast(req);
        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 
        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
-               mv_cesa_dma_step(&creq->base);
+               mv_cesa_ahash_dma_step(ahashreq);
        else
                mv_cesa_ahash_std_step(ahashreq);
 }
@@ -584,12 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
        struct mv_cesa_ahash_dma_iter iter;
        struct mv_cesa_op_ctx *op = NULL;
        unsigned int frag_len;
+       bool set_state = false;
        int ret;
        u32 type;
 
        basereq->chain.first = NULL;
        basereq->chain.last = NULL;
 
+       if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
+               set_state = true;
+
        if (creq->src_nents) {
                ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
                                 DMA_TO_DEVICE);
@@ -683,6 +706,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
        if (type != CESA_TDMA_RESULT)
                basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
 
+       if (set_state) {
+               /*
+                * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
+                * let the step logic know that the IVDIG registers should be
+                * explicitly set before launching a TDMA chain.
+                */
+               basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
+       }
+
        return 0;
 
 err_free_tdma:
index 4416b88eca708aff6aadbff76ec04d95f6b40dbb..c76375ff376d39e5dc2e74463a310231b58f4c86 100644 (file)
@@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
                last->next = dreq->chain.first;
                engine->chain.last = dreq->chain.last;
 
-               if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
+               /*
+                * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+                * the last element of the current chain, or if the request
+                * being queued needs the IV regs to be set before lauching
+                * the request.
+                */
+               if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
+                   !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
                        last->next_dma = dreq->chain.first->cur_dma;
        }
 }
index 70e13230d8db835ec201590762ea0ac5331f7ca2..9ad0b1934be9a31173ede1ed6c1c3705cc0c1e05 100644 (file)
@@ -721,11 +721,17 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val)
 
        ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
                                &buf, sizeof(buf));
-       if (!ret)
+       if (ret)
+               return ret;
+
+       if (scpi_info->is_legacy)
+               /* only 32-bits supported, hi_val can be junk */
+               *val = le32_to_cpu(buf.lo_val);
+       else
                *val = (u64)le32_to_cpu(buf.hi_val) << 32 |
                        le32_to_cpu(buf.lo_val);
 
-       return ret;
+       return 0;
 }
 
 static int scpi_device_get_power_state(u16 dev_id)
index 44bdb78f837b4d89f2aaef6e7d0764b8c2783d87..29d58feaf67535d0efc470339de02124ed92cd44 100644 (file)
@@ -270,8 +270,7 @@ static int suspend_test_thread(void *arg)
        struct cpuidle_device *dev;
        struct cpuidle_driver *drv;
        /* No need for an actual callback, we just want to wake up the CPU. */
-       struct timer_list wakeup_timer =
-               TIMER_INITIALIZER(dummy_callback, 0, 0);
+       struct timer_list wakeup_timer;
 
        /* Wait for the main thread to give the start signal. */
        wait_for_completion(&suspend_threads_started);
@@ -287,6 +286,7 @@ static int suspend_test_thread(void *arg)
        pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
                cpu, drv->state_count - 1);
 
+       setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
        for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
                int index;
                /*
index cbeea915f02699696072d1da77c724588e5c3b0e..8037426ec50fa4337cd4e0cc40dabb71082fb487 100644 (file)
@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
                                DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
                                &lp->rx_dma_regs->dmasm);
 
-       korina_free_ring(dev);
-
        napi_disable(&lp->napi);
 
+       korina_free_ring(dev);
+
        if (korina_init(dev) < 0) {
                printk(KERN_ERR "%s: cannot restart device\n", dev->name);
                return;
@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
        tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
        writel(tmp, &lp->rx_dma_regs->dmasm);
 
-       korina_free_ring(dev);
-
        napi_disable(&lp->napi);
 
        cancel_work_sync(&lp->restart_task);
 
+       korina_free_ring(dev);
+
        free_irq(lp->rx_irq, dev);
        free_irq(lp->tx_irq, dev);
        free_irq(lp->ovr_irq, dev);
index bcd95533905865d38c80f72ac32e34f8a653a839..edbe200ac2fa4a11ad30bb8f08d8f7e3cb910708 100644 (file)
@@ -1638,7 +1638,8 @@ int mlx4_en_start_port(struct net_device *dev)
 
        /* Configure tx cq's and rings */
        for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
-               u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1;
+               u8 num_tx_rings_p_up = t == TX ?
+                       priv->num_tx_rings_p_up : priv->tx_ring_num[t];
 
                for (i = 0; i < priv->tx_ring_num[t]; i++) {
                        /* Configure cq */
index f9b97f5946f8919aa1d8ff63044c796775cd01cc..44389c90056a0f197a97f5d36478ec597f266004 100644 (file)
@@ -326,6 +326,7 @@ enum cfg_version {
 static const struct pci_device_id rtl8169_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8129), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8136), 0, 0, RTL_CFG_2 },
+       { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8161), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8167), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8168), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8169), 0, 0, RTL_CFG_0 },
index fda01f770eff4239cafaf12e940ee23a07ecb43f..b0344c21375292fe3489a4872400be6535785491 100644 (file)
@@ -116,7 +116,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
        unsigned int mii_address = priv->hw->mii.addr;
        unsigned int mii_data = priv->hw->mii.data;
 
-       u32 value = MII_WRITE | MII_BUSY;
+       u32 value = MII_BUSY;
 
        value |= (phyaddr << priv->hw->mii.addr_shift)
                & priv->hw->mii.addr_mask;
@@ -126,6 +126,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
                & priv->hw->mii.clk_csr_mask;
        if (priv->plat->has_gmac4)
                value |= MII_GMAC4_WRITE;
+       else
+               value |= MII_WRITE;
 
        /* Wait until any existing MII operation is complete */
        if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
index 031093e1c25f55244e6bdfde4ebeb65c0f2f10c1..dbfbb33ac66c2caf05e22d7e4b0bd54629bec635 100644 (file)
@@ -99,6 +99,11 @@ struct ipvl_port {
        int                     count;
 };
 
+struct ipvl_skb_cb {
+       bool tx_pkt;
+};
+#define IPVL_SKB_CB(_skb) ((struct ipvl_skb_cb *)&((_skb)->cb[0]))
+
 static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
 {
        return rcu_dereference(d->rx_handler_data);
index b4e990743e1da0cb3a946f5473d02cce7447bd1a..83ce74acf82d5e08676cb0d796aac1fe1b524071 100644 (file)
@@ -198,7 +198,7 @@ void ipvlan_process_multicast(struct work_struct *work)
        unsigned int mac_hash;
        int ret;
        u8 pkt_type;
-       bool hlocal, dlocal;
+       bool tx_pkt;
 
        __skb_queue_head_init(&list);
 
@@ -207,8 +207,11 @@ void ipvlan_process_multicast(struct work_struct *work)
        spin_unlock_bh(&port->backlog.lock);
 
        while ((skb = __skb_dequeue(&list)) != NULL) {
+               struct net_device *dev = skb->dev;
+               bool consumed = false;
+
                ethh = eth_hdr(skb);
-               hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
+               tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
                mac_hash = ipvlan_mac_hash(ethh->h_dest);
 
                if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
@@ -216,41 +219,45 @@ void ipvlan_process_multicast(struct work_struct *work)
                else
                        pkt_type = PACKET_MULTICAST;
 
-               dlocal = false;
                rcu_read_lock();
                list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
-                       if (hlocal && (ipvlan->dev == skb->dev)) {
-                               dlocal = true;
+                       if (tx_pkt && (ipvlan->dev == skb->dev))
                                continue;
-                       }
                        if (!test_bit(mac_hash, ipvlan->mac_filters))
                                continue;
-
+                       if (!(ipvlan->dev->flags & IFF_UP))
+                               continue;
                        ret = NET_RX_DROP;
                        len = skb->len + ETH_HLEN;
                        nskb = skb_clone(skb, GFP_ATOMIC);
-                       if (!nskb)
-                               goto acct;
-
-                       nskb->pkt_type = pkt_type;
-                       nskb->dev = ipvlan->dev;
-                       if (hlocal)
-                               ret = dev_forward_skb(ipvlan->dev, nskb);
-                       else
-                               ret = netif_rx(nskb);
-acct:
+                       local_bh_disable();
+                       if (nskb) {
+                               consumed = true;
+                               nskb->pkt_type = pkt_type;
+                               nskb->dev = ipvlan->dev;
+                               if (tx_pkt)
+                                       ret = dev_forward_skb(ipvlan->dev, nskb);
+                               else
+                                       ret = netif_rx(nskb);
+                       }
                        ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
+                       local_bh_enable();
                }
                rcu_read_unlock();
 
-               if (dlocal) {
+               if (tx_pkt) {
                        /* If the packet originated here, send it out. */
                        skb->dev = port->dev;
                        skb->pkt_type = pkt_type;
                        dev_queue_xmit(skb);
                } else {
-                       kfree_skb(skb);
+                       if (consumed)
+                               consume_skb(skb);
+                       else
+                               kfree_skb(skb);
                }
+               if (dev)
+                       dev_put(dev);
        }
 }
 
@@ -470,15 +477,24 @@ out:
 }
 
 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
-                                    struct sk_buff *skb)
+                                    struct sk_buff *skb, bool tx_pkt)
 {
        if (skb->protocol == htons(ETH_P_PAUSE)) {
                kfree_skb(skb);
                return;
        }
 
+       /* Record that the deferred packet is from TX or RX path. By
+        * looking at mac-addresses on packet will lead to erronus decisions.
+        * (This would be true for a loopback-mode on master device or a
+        * hair-pin mode of the switch.)
+        */
+       IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
+
        spin_lock(&port->backlog.lock);
        if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
+               if (skb->dev)
+                       dev_hold(skb->dev);
                __skb_queue_tail(&port->backlog, skb);
                spin_unlock(&port->backlog.lock);
                schedule_work(&port->wq);
@@ -537,7 +553,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
 
        } else if (is_multicast_ether_addr(eth->h_dest)) {
                ipvlan_skb_crossing_ns(skb, NULL);
-               ipvlan_multicast_enqueue(ipvlan->port, skb);
+               ipvlan_multicast_enqueue(ipvlan->port, skb, true);
                return NET_XMIT_SUCCESS;
        }
 
@@ -634,7 +650,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
                         */
                        if (nskb) {
                                ipvlan_skb_crossing_ns(nskb, NULL);
-                               ipvlan_multicast_enqueue(port, nskb);
+                               ipvlan_multicast_enqueue(port, nskb, false);
                        }
                }
        } else {
index 693ec5b6622233cd3a28c64c11d6abb97585318b..8b0f99300cbc97d8c8b93c3dfa99cd841914c086 100644 (file)
@@ -135,6 +135,7 @@ err:
 static void ipvlan_port_destroy(struct net_device *dev)
 {
        struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
+       struct sk_buff *skb;
 
        dev->priv_flags &= ~IFF_IPVLAN_MASTER;
        if (port->mode == IPVLAN_MODE_L3S) {
@@ -144,7 +145,11 @@ static void ipvlan_port_destroy(struct net_device *dev)
        }
        netdev_rx_handler_unregister(dev);
        cancel_work_sync(&port->wq);
-       __skb_queue_purge(&port->backlog);
+       while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
+               if (skb->dev)
+                       dev_put(skb->dev);
+               kfree_skb(skb);
+       }
        kfree(port);
 }
 
index a8732fbed381a45bbce44fcdf0731ccfdc1a09ba..5c74f60d0a5094dc0a27f27ae0acd41667414332 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -451,16 +451,37 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
                __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 }
 
+static int __dax_invalidate_mapping_entry(struct address_space *mapping,
+                                         pgoff_t index, bool trunc)
+{
+       int ret = 0;
+       void *entry;
+       struct radix_tree_root *page_tree = &mapping->page_tree;
+
+       spin_lock_irq(&mapping->tree_lock);
+       entry = get_unlocked_mapping_entry(mapping, index, NULL);
+       if (!entry || !radix_tree_exceptional_entry(entry))
+               goto out;
+       if (!trunc &&
+           (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
+            radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
+               goto out;
+       radix_tree_delete(page_tree, index);
+       mapping->nrexceptional--;
+       ret = 1;
+out:
+       put_unlocked_mapping_entry(mapping, index, entry);
+       spin_unlock_irq(&mapping->tree_lock);
+       return ret;
+}
 /*
  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
  * entry to get unlocked before deleting it.
  */
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 {
-       void *entry;
+       int ret = __dax_invalidate_mapping_entry(mapping, index, true);
 
-       spin_lock_irq(&mapping->tree_lock);
-       entry = get_unlocked_mapping_entry(mapping, index, NULL);
        /*
         * This gets called from truncate / punch_hole path. As such, the caller
         * must hold locks protecting against concurrent modifications of the
@@ -468,16 +489,46 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
         * caller has seen exceptional entry for this index, we better find it
         * at that index as well...
         */
-       if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
-               spin_unlock_irq(&mapping->tree_lock);
-               return 0;
-       }
-       radix_tree_delete(&mapping->page_tree, index);
+       WARN_ON_ONCE(!ret);
+       return ret;
+}
+
+/*
+ * Invalidate exceptional DAX entry if easily possible. This handles DAX
+ * entries for invalidate_inode_pages() so we evict the entry only if we can
+ * do so without blocking.
+ */
+int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
+{
+       int ret = 0;
+       void *entry, **slot;
+       struct radix_tree_root *page_tree = &mapping->page_tree;
+
+       spin_lock_irq(&mapping->tree_lock);
+       entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
+       if (!entry || !radix_tree_exceptional_entry(entry) ||
+           slot_locked(mapping, slot))
+               goto out;
+       if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
+           radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
+               goto out;
+       radix_tree_delete(page_tree, index);
        mapping->nrexceptional--;
+       ret = 1;
+out:
        spin_unlock_irq(&mapping->tree_lock);
-       dax_wake_mapping_entry_waiter(mapping, index, entry, true);
+       if (ret)
+               dax_wake_mapping_entry_waiter(mapping, index, entry, true);
+       return ret;
+}
 
-       return 1;
+/*
+ * Invalidate exceptional DAX entry if it is clean.
+ */
+int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+                                     pgoff_t index)
+{
+       return __dax_invalidate_mapping_entry(mapping, index, false);
 }
 
 /*
@@ -488,15 +539,16 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
  * otherwise it will simply fall out of the page cache under memory
  * pressure without ever having been dirtied.
  */
-static int dax_load_hole(struct address_space *mapping, void *entry,
+static int dax_load_hole(struct address_space *mapping, void **entry,
                         struct vm_fault *vmf)
 {
        struct page *page;
+       int ret;
 
        /* Hole page already exists? Return it...  */
-       if (!radix_tree_exceptional_entry(entry)) {
-               vmf->page = entry;
-               return VM_FAULT_LOCKED;
+       if (!radix_tree_exceptional_entry(*entry)) {
+               page = *entry;
+               goto out;
        }
 
        /* This will replace locked radix tree entry with a hole page */
@@ -504,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
                                   vmf->gfp_mask | __GFP_ZERO);
        if (!page)
                return VM_FAULT_OOM;
+ out:
        vmf->page = page;
-       return VM_FAULT_LOCKED;
+       ret = finish_fault(vmf);
+       vmf->page = NULL;
+       *entry = page;
+       if (!ret) {
+               /* Grab reference for PTE that is now referencing the page */
+               get_page(page);
+               return VM_FAULT_NOPAGE;
+       }
+       return ret;
 }
 
 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
@@ -934,6 +995,17 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
        if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
                return -EIO;
 
+       /*
+        * Write can allocate block for an area which has a hole page mapped
+        * into page tables. We have to tear down these mappings so that data
+        * written by write(2) is visible in mmap.
+        */
+       if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
+               invalidate_inode_pages2_range(inode->i_mapping,
+                                             pos >> PAGE_SHIFT,
+                                             (end - 1) >> PAGE_SHIFT);
+       }
+
        while (pos < end) {
                unsigned offset = pos & (PAGE_SIZE - 1);
                struct blk_dax_ctl dax = { 0 };
@@ -992,23 +1064,6 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
        if (iov_iter_rw(iter) == WRITE)
                flags |= IOMAP_WRITE;
 
-       /*
-        * Yes, even DAX files can have page cache attached to them:  A zeroed
-        * page is inserted into the pagecache when we have to serve a write
-        * fault on a hole.  It should never be dirtied and can simply be
-        * dropped from the pagecache once we get real data for the page.
-        *
-        * XXX: This is racy against mmap, and there's nothing we can do about
-        * it. We'll eventually need to shift this down even further so that
-        * we can check if we allocated blocks over a hole first.
-        */
-       if (mapping->nrpages) {
-               ret = invalidate_inode_pages2_range(mapping,
-                               pos >> PAGE_SHIFT,
-                               (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
-               WARN_ON_ONCE(ret);
-       }
-
        while (iov_iter_count(iter)) {
                ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
                                iter, dax_iomap_actor);
@@ -1023,6 +1078,15 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_rw);
 
+static int dax_fault_return(int error)
+{
+       if (error == 0)
+               return VM_FAULT_NOPAGE;
+       if (error == -ENOMEM)
+               return VM_FAULT_OOM;
+       return VM_FAULT_SIGBUS;
+}
+
 /**
  * dax_iomap_fault - handle a page fault on a DAX file
  * @vma: The virtual memory area where the fault occurred
@@ -1055,12 +1119,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
        if (pos >= i_size_read(inode))
                return VM_FAULT_SIGBUS;
 
-       entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
-       if (IS_ERR(entry)) {
-               error = PTR_ERR(entry);
-               goto out;
-       }
-
        if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
                flags |= IOMAP_WRITE;
 
@@ -1071,9 +1129,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
         */
        error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
        if (error)
-               goto unlock_entry;
+               return dax_fault_return(error);
        if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
-               error = -EIO;           /* fs corruption? */
+               vmf_ret = dax_fault_return(-EIO);       /* fs corruption? */
+               goto finish_iomap;
+       }
+
+       entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
+       if (IS_ERR(entry)) {
+               vmf_ret = dax_fault_return(PTR_ERR(entry));
                goto finish_iomap;
        }
 
@@ -1096,13 +1160,13 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                }
 
                if (error)
-                       goto finish_iomap;
+                       goto error_unlock_entry;
 
                __SetPageUptodate(vmf->cow_page);
                vmf_ret = finish_fault(vmf);
                if (!vmf_ret)
                        vmf_ret = VM_FAULT_DONE_COW;
-               goto finish_iomap;
+               goto unlock_entry;
        }
 
        switch (iomap.type) {
@@ -1114,12 +1178,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                }
                error = dax_insert_mapping(mapping, iomap.bdev, sector,
                                PAGE_SIZE, &entry, vma, vmf);
+               /* -EBUSY is fine, somebody else faulted on the same PTE */
+               if (error == -EBUSY)
+                       error = 0;
                break;
        case IOMAP_UNWRITTEN:
        case IOMAP_HOLE:
                if (!(vmf->flags & FAULT_FLAG_WRITE)) {
-                       vmf_ret = dax_load_hole(mapping, entry, vmf);
-                       break;
+                       vmf_ret = dax_load_hole(mapping, &entry, vmf);
+                       goto unlock_entry;
                }
                /*FALLTHRU*/
        default:
@@ -1128,31 +1195,25 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                break;
        }
 
+ error_unlock_entry:
+       vmf_ret = dax_fault_return(error) | major;
+ unlock_entry:
+       put_locked_mapping_entry(mapping, vmf->pgoff, entry);
  finish_iomap:
        if (ops->iomap_end) {
-               if (error || (vmf_ret & VM_FAULT_ERROR)) {
-                       /* keep previous error */
-                       ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
-                                       &iomap);
-               } else {
-                       error = ops->iomap_end(inode, pos, PAGE_SIZE,
-                                       PAGE_SIZE, flags, &iomap);
-               }
-       }
- unlock_entry:
-       if (vmf_ret != VM_FAULT_LOCKED || error)
-               put_locked_mapping_entry(mapping, vmf->pgoff, entry);
- out:
-       if (error == -ENOMEM)
-               return VM_FAULT_OOM | major;
-       /* -EBUSY is fine, somebody else faulted on the same PTE */
-       if (error < 0 && error != -EBUSY)
-               return VM_FAULT_SIGBUS | major;
-       if (vmf_ret) {
-               WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
-               return vmf_ret;
+               int copied = PAGE_SIZE;
+
+               if (vmf_ret & VM_FAULT_ERROR)
+                       copied = 0;
+               /*
+                * The fault is done by now and there's no way back (other
+                * thread may be already happily using PTE we have installed).
+                * Just ignore error from ->iomap_end since we cannot do much
+                * with it.
+                */
+               ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
        }
-       return VM_FAULT_NOPAGE | major;
+       return vmf_ret;
 }
 EXPORT_SYMBOL_GPL(dax_iomap_fault);
 
@@ -1276,16 +1337,6 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
                goto fallback;
 
-       /*
-        * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
-        * PMD or a HZP entry.  If it can't (because a 4k page is already in
-        * the tree, for instance), it will return -EEXIST and we just fall
-        * back to 4k entries.
-        */
-       entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
-       if (IS_ERR(entry))
-               goto fallback;
-
        /*
         * Note that we don't use iomap_apply here.  We aren't doing I/O, only
         * setting up a mapping, so really we're using iomap_begin() as a way
@@ -1294,10 +1345,21 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        pos = (loff_t)pgoff << PAGE_SHIFT;
        error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
        if (error)
-               goto unlock_entry;
+               goto fallback;
+
        if (iomap.offset + iomap.length < pos + PMD_SIZE)
                goto finish_iomap;
 
+       /*
+        * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
+        * PMD or a HZP entry.  If it can't (because a 4k page is already in
+        * the tree, for instance), it will return -EEXIST and we just fall
+        * back to 4k entries.
+        */
+       entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
+       if (IS_ERR(entry))
+               goto finish_iomap;
+
        vmf.pgoff = pgoff;
        vmf.flags = flags;
        vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
@@ -1310,7 +1372,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        case IOMAP_UNWRITTEN:
        case IOMAP_HOLE:
                if (WARN_ON_ONCE(write))
-                       goto finish_iomap;
+                       goto unlock_entry;
                result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
                                &entry);
                break;
@@ -1319,20 +1381,23 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
                break;
        }
 
+ unlock_entry:
+       put_locked_mapping_entry(mapping, pgoff, entry);
  finish_iomap:
        if (ops->iomap_end) {
-               if (result == VM_FAULT_FALLBACK) {
-                       ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
-                                       &iomap);
-               } else {
-                       error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
-                                       iomap_flags, &iomap);
-                       if (error)
-                               result = VM_FAULT_FALLBACK;
-               }
+               int copied = PMD_SIZE;
+
+               if (result == VM_FAULT_FALLBACK)
+                       copied = 0;
+               /*
+                * The fault is done by now and there's no way back (other
+                * thread may be already happily using PMD we have installed).
+                * Just ignore error from ->iomap_end since we cannot do much
+                * with it.
+                */
+               ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
+                               &iomap);
        }
- unlock_entry:
-       put_locked_mapping_entry(mapping, pgoff, entry);
  fallback:
        if (result == VM_FAULT_FALLBACK) {
                split_huge_pmd(vma, pmd, address);
index 0093ea2512a85809e16605088074a8335513e81c..f073bfca694b9982b8bc23e8f0e00be6bef075a7 100644 (file)
@@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode,
                        mutex_unlock(&ei->truncate_mutex);
                        goto cleanup;
                }
-       } else {
-               *new = true;
        }
+       *new = true;
 
        ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
        mutex_unlock(&ei->truncate_mutex);
index b5f184493c57b0fd91cfc5f6c0633577ce770884..d663d3d7c81cb7fdff0f33f1903e9ed4d1f77f9a 100644 (file)
@@ -258,7 +258,6 @@ out:
 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        int result;
-       handle_t *handle = NULL;
        struct inode *inode = file_inode(vma->vm_file);
        struct super_block *sb = inode->i_sb;
        bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (write) {
                sb_start_pagefault(sb);
                file_update_time(vma->vm_file);
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-               handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
-                                               EXT4_DATA_TRANS_BLOCKS(sb));
-       } else
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-
-       if (IS_ERR(handle))
-               result = VM_FAULT_SIGBUS;
-       else
-               result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
-
-       if (write) {
-               if (!IS_ERR(handle))
-                       ext4_journal_stop(handle);
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+       }
+       down_read(&EXT4_I(inode)->i_mmap_sem);
+       result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
+       up_read(&EXT4_I(inode)->i_mmap_sem);
+       if (write)
                sb_end_pagefault(sb);
-       } else
-               up_read(&EXT4_I(inode)->i_mmap_sem);
 
        return result;
 }
@@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
                                                pmd_t *pmd, unsigned int flags)
 {
        int result;
-       handle_t *handle = NULL;
        struct inode *inode = file_inode(vma->vm_file);
        struct super_block *sb = inode->i_sb;
        bool write = flags & FAULT_FLAG_WRITE;
@@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
        if (write) {
                sb_start_pagefault(sb);
                file_update_time(vma->vm_file);
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-               handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
-                               ext4_chunk_trans_blocks(inode,
-                                                       PMD_SIZE / PAGE_SIZE));
-       } else
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-
-       if (IS_ERR(handle))
-               result = VM_FAULT_SIGBUS;
-       else {
-               result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
-                                            &ext4_iomap_ops);
        }
-
-       if (write) {
-               if (!IS_ERR(handle))
-                       ext4_journal_stop(handle);
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+       down_read(&EXT4_I(inode)->i_mmap_sem);
+       result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
+                                    &ext4_iomap_ops);
+       up_read(&EXT4_I(inode)->i_mmap_sem);
+       if (write)
                sb_end_pagefault(sb);
-       } else
-               up_read(&EXT4_I(inode)->i_mmap_sem);
 
        return result;
 }
diff --git a/include/dt-bindings/mfd/tps65217.h b/include/dt-bindings/mfd/tps65217.h
deleted file mode 100644 (file)
index cafb9e6..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * This header provides macros for TI TPS65217 DT bindings.
- *
- * Copyright (C) 2016 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DT_BINDINGS_TPS65217_H__
-#define __DT_BINDINGS_TPS65217_H__
-
-#define TPS65217_IRQ_USB       0
-#define TPS65217_IRQ_AC                1
-#define TPS65217_IRQ_PB                2
-
-#endif
index f97bcfe794724d4fb757297851fe218268d0c86b..24ad711739955e573aefea713852b2f49a8fed85 100644 (file)
@@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                        struct iomap_ops *ops);
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
+int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+                                     pgoff_t index);
 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
                pgoff_t index, void *entry, bool wake_all);
 
index 70231425379751021b57326eec85e70d881d3f42..a0934e6c9babf8436a18b28f4dea285894e5dca5 100644 (file)
@@ -610,7 +610,6 @@ bool bpf_helper_changes_pkt_data(void *func);
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                                       const struct bpf_insn *patch, u32 len);
 void bpf_warn_invalid_xdp_action(u32 act);
-void bpf_warn_invalid_xdp_buffer(void);
 
 #ifdef CONFIG_BPF_JIT
 extern int bpf_jit_enable;
index c56b39890a412abfec4acc31e404781215ae3ff6..6b5818d6de322f8b5898e082ab4b3936042beb5a 100644 (file)
  */
 enum pageflags {
        PG_locked,              /* Page is locked. Don't touch. */
-       PG_waiters,             /* Page has waiters, check its waitqueue */
        PG_error,
        PG_referenced,
        PG_uptodate,
        PG_dirty,
        PG_lru,
        PG_active,
+       PG_waiters,             /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
        PG_slab,
        PG_owner_priv_1,        /* Owner use. If pagecache, fs may use*/
        PG_arch_1,
index f0cf5a1b777e4d4c37c0b927f5c9ee7bfd78fd64..0378e88f6fd3d6076f0393d67343cdb92643dbb4 100644 (file)
@@ -110,6 +110,7 @@ struct netns_ipv4 {
        int sysctl_tcp_orphan_retries;
        int sysctl_tcp_fin_timeout;
        unsigned int sysctl_tcp_notsent_lowat;
+       int sysctl_tcp_tw_reuse;
 
        int sysctl_igmp_max_memberships;
        int sysctl_igmp_max_msf;
index 207147b4c6b2a126af103e715d99c8914b290d48..6061963cca98ed84ef09f788ee0ebdc3867a07c8 100644 (file)
@@ -252,7 +252,6 @@ extern int sysctl_tcp_wmem[3];
 extern int sysctl_tcp_rmem[3];
 extern int sysctl_tcp_app_win;
 extern int sysctl_tcp_adv_win_scale;
-extern int sysctl_tcp_tw_reuse;
 extern int sysctl_tcp_frto;
 extern int sysctl_tcp_low_latency;
 extern int sysctl_tcp_nometrics_save;
index 042fd7e8e03098882b165787b163a597c4c92aa8..f75c4d031eeb2152c75182466dbf5ec97e93c008 100644 (file)
@@ -1471,6 +1471,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
                        bool multi_instance)
 {
        int cpu, ret = 0;
+       bool dynstate;
 
        if (cpuhp_cb_check(state) || !name)
                return -EINVAL;
@@ -1480,6 +1481,12 @@ int __cpuhp_setup_state(enum cpuhp_state state,
        ret = cpuhp_store_callbacks(state, name, startup, teardown,
                                    multi_instance);
 
+       dynstate = state == CPUHP_AP_ONLINE_DYN;
+       if (ret > 0 && dynstate) {
+               state = ret;
+               ret = 0;
+       }
+
        if (ret || !invoke || !startup)
                goto out;
 
@@ -1508,7 +1515,7 @@ out:
         * If the requested state is CPUHP_AP_ONLINE_DYN, return the
         * dynamically allocated state in case of success.
         */
-       if (!ret && state == CPUHP_AP_ONLINE_DYN)
+       if (!ret && dynstate)
                return state;
        return ret;
 }
index 82f26cde830c4b70df30cfa47c7e21dbe6d05a7f..d0e4d1002059360e50254ae2c87dc8f7a87a2dff 100644 (file)
@@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
 }
 EXPORT_SYMBOL_GPL(add_page_wait_queue);
 
+#ifndef clear_bit_unlock_is_negative_byte
+
+/*
+ * PG_waiters is the high bit in the same byte as PG_lock.
+ *
+ * On x86 (and on many other architectures), we can clear PG_lock and
+ * test the sign bit at the same time. But if the architecture does
+ * not support that special operation, we just do this all by hand
+ * instead.
+ *
+ * The read of PG_waiters has to be after (or concurrently with) PG_locked
+ * being cleared, but a memory barrier should be unneccssary since it is
+ * in the same byte as PG_locked.
+ */
+static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
+{
+       clear_bit_unlock(nr, mem);
+       /* smp_mb__after_atomic(); */
+       return test_bit(PG_waiters, mem);
+}
+
+#endif
+
 /**
  * unlock_page - unlock a locked page
  * @page: the page
@@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
  * mechanism between PageLocked pages and PageWriteback pages is shared.
  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
  *
- * The mb is necessary to enforce ordering between the clear_bit and the read
- * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
+ * Note that this depends on PG_waiters being the sign bit in the byte
+ * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
+ * clear the PG_locked bit and test PG_waiters at the same time fairly
+ * portably (architectures that do LL/SC can test any bit, while x86 can
+ * test the sign bit).
  */
 void unlock_page(struct page *page)
 {
+       BUILD_BUG_ON(PG_waiters != 7);
        page = compound_head(page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
-       clear_bit_unlock(PG_locked, &page->flags);
-       smp_mb__after_atomic();
-       wake_up_page(page, PG_locked);
+       if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
+               wake_up_page_bit(page, PG_locked);
 }
 EXPORT_SYMBOL(unlock_page);
 
index fd97f1dbce290f39e1d0d0367006df954f20e8f1..dd7b24e083c5b1f76851eb0b5e3359dde92d910e 100644 (file)
 #include <linux/rmap.h>
 #include "internal.h"
 
-static void clear_exceptional_entry(struct address_space *mapping,
-                                   pgoff_t index, void *entry)
+static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
+                              void *entry)
 {
        struct radix_tree_node *node;
        void **slot;
 
-       /* Handled by shmem itself */
-       if (shmem_mapping(mapping))
-               return;
-
-       if (dax_mapping(mapping)) {
-               dax_delete_mapping_entry(mapping, index);
-               return;
-       }
        spin_lock_irq(&mapping->tree_lock);
        /*
         * Regular page slots are stabilized by the page lock even
@@ -55,6 +47,56 @@ unlock:
        spin_unlock_irq(&mapping->tree_lock);
 }
 
+/*
+ * Unconditionally remove exceptional entry. Usually called from truncate path.
+ */
+static void truncate_exceptional_entry(struct address_space *mapping,
+                                      pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return;
+
+       if (dax_mapping(mapping)) {
+               dax_delete_mapping_entry(mapping, index);
+               return;
+       }
+       clear_shadow_entry(mapping, index, entry);
+}
+
+/*
+ * Invalidate exceptional entry if easily possible. This handles exceptional
+ * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
+ * clean entries.
+ */
+static int invalidate_exceptional_entry(struct address_space *mapping,
+                                       pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return 1;
+       if (dax_mapping(mapping))
+               return dax_invalidate_mapping_entry(mapping, index);
+       clear_shadow_entry(mapping, index, entry);
+       return 1;
+}
+
+/*
+ * Invalidate exceptional entry if clean. This handles exceptional entries for
+ * invalidate_inode_pages2() so for DAX it evicts only clean entries.
+ */
+static int invalidate_exceptional_entry2(struct address_space *mapping,
+                                        pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return 1;
+       if (dax_mapping(mapping))
+               return dax_invalidate_mapping_entry_sync(mapping, index);
+       clear_shadow_entry(mapping, index, entry);
+       return 1;
+}
+
 /**
  * do_invalidatepage - invalidate part or all of a page
  * @page: the page which is affected
@@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               truncate_exceptional_entry(mapping, index,
+                                                          page);
                                continue;
                        }
 
@@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        }
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               truncate_exceptional_entry(mapping, index,
+                                                          page);
                                continue;
                        }
 
@@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               invalidate_exceptional_entry(mapping, index,
+                                                            page);
                                continue;
                        }
 
@@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               if (!invalidate_exceptional_entry2(mapping,
+                                                                  index, page))
+                                       ret = -EBUSY;
                                continue;
                        }
 
index e6c412b94decba010662b90240959320c4e77921..1969b3f118c1d6e30b3edccd7c85a6caa520af5c 100644 (file)
@@ -2972,12 +2972,6 @@ void bpf_warn_invalid_xdp_action(u32 act)
 }
 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
 
-void bpf_warn_invalid_xdp_buffer(void)
-{
-       WARN_ONCE(1, "Illegal XDP buffer encountered, expect throughput degradation\n");
-}
-EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_buffer);
-
 static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                                        int src_reg, int ctx_off,
                                        struct bpf_insn *insn_buf,
index 80bc36b25de21d5e6b1c3e6f6001258b38656d41..22cbd61079b5a9d2661583b7d96eea46eddb685d 100644 (file)
@@ -432,13 +432,6 @@ static struct ctl_table ipv4_table[] = {
                .extra1         = &tcp_adv_win_scale_min,
                .extra2         = &tcp_adv_win_scale_max,
        },
-       {
-               .procname       = "tcp_tw_reuse",
-               .data           = &sysctl_tcp_tw_reuse,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_frto",
                .data           = &sysctl_tcp_frto,
@@ -960,6 +953,13 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "tcp_tw_reuse",
+               .data           = &init_net.ipv4.sysctl_tcp_tw_reuse,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        {
                .procname       = "fib_multipath_use_neigh",
index 30d81f533ada5708a61155b589e3835d9af4128d..fe9da4fb96bf967ab27f5179fc5a9ac3576cc693 100644 (file)
@@ -84,7 +84,6 @@
 #include <crypto/hash.h>
 #include <linux/scatterlist.h>
 
-int sysctl_tcp_tw_reuse __read_mostly;
 int sysctl_tcp_low_latency __read_mostly;
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -120,7 +119,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
           and use initial timestamp retrieved from peer table.
         */
        if (tcptw->tw_ts_recent_stamp &&
-           (!twp || (sysctl_tcp_tw_reuse &&
+           (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
                             get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
                tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
                if (tp->write_seq == 0)
@@ -2456,6 +2455,7 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.sysctl_tcp_orphan_retries = 0;
        net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
        net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
+       net->ipv4.sysctl_tcp_tw_reuse = 0;
 
        return 0;
 fail:
index 2d4c4d3911c02ce91de79ac4c9ad4923d4bc2329..9c62b6325f7adc92420b605cf4cdc3b26da437fc 100644 (file)
@@ -606,7 +606,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        rcu_assign_pointer(flow->sf_acts, acts);
        packet->priority = flow->key.phy.priority;
        packet->mark = flow->key.phy.skb_mark;
-       packet->protocol = flow->key.eth.type;
 
        rcu_read_lock();
        dp = get_dp_rcu(net, ovs_header->dp_ifindex);
index 08aa926cd5cfe5f6b3f47f3dc4df5c970a6ba2a7..2c0a00f7f1b7d195b98f70e6ec0235b650a63311 100644 (file)
@@ -312,7 +312,8 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
  * Returns 0 if it encounters a non-vlan or incomplete packet.
  * Returns 1 after successfully parsing vlan tag.
  */
-static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh)
+static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
+                         bool untag_vlan)
 {
        struct vlan_head *vh = (struct vlan_head *)skb->data;
 
@@ -330,7 +331,20 @@ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh)
        key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT);
        key_vh->tpid = vh->tpid;
 
-       __skb_pull(skb, sizeof(struct vlan_head));
+       if (unlikely(untag_vlan)) {
+               int offset = skb->data - skb_mac_header(skb);
+               u16 tci;
+               int err;
+
+               __skb_push(skb, offset);
+               err = __skb_vlan_pop(skb, &tci);
+               __skb_pull(skb, offset);
+               if (err)
+                       return err;
+               __vlan_hwaccel_put_tag(skb, key_vh->tpid, tci);
+       } else {
+               __skb_pull(skb, sizeof(struct vlan_head));
+       }
        return 1;
 }
 
@@ -351,13 +365,13 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
                key->eth.vlan.tpid = skb->vlan_proto;
        } else {
                /* Parse outer vlan tag in the non-accelerated case. */
-               res = parse_vlan_tag(skb, &key->eth.vlan);
+               res = parse_vlan_tag(skb, &key->eth.vlan, true);
                if (res <= 0)
                        return res;
        }
 
        /* Parse inner vlan tag. */
-       res = parse_vlan_tag(skb, &key->eth.cvlan);
+       res = parse_vlan_tag(skb, &key->eth.cvlan, false);
        if (res <= 0)
                return res;
 
@@ -800,29 +814,15 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
        if (err)
                return err;
 
-       if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
-               /* key_extract assumes that skb->protocol is set-up for
-                * layer 3 packets which is the case for other callers,
-                * in particular packets recieved from the network stack.
-                * Here the correct value can be set from the metadata
-                * extracted above.
-                */
-               skb->protocol = key->eth.type;
-       } else {
-               struct ethhdr *eth;
-
-               skb_reset_mac_header(skb);
-               eth = eth_hdr(skb);
-
-               /* Normally, setting the skb 'protocol' field would be
-                * handled by a call to eth_type_trans(), but it assumes
-                * there's a sending device, which we may not have.
-                */
-               if (eth_proto_is_802_3(eth->h_proto))
-                       skb->protocol = eth->h_proto;
-               else
-                       skb->protocol = htons(ETH_P_802_2);
-       }
+       /* key_extract assumes that skb->protocol is set-up for
+        * layer 3 packets which is the case for other callers,
+        * in particular packets received from the network stack.
+        * Here the correct value can be set from the metadata
+        * extracted above.
+        * For L2 packet key eth type would be zero. skb protocol
+        * would be set to correct value later during key-extact.
+        */
 
+       skb->protocol = key->eth.type;
        return key_extract(skb, key);
 }
index 3fbba79a4ef0521604df48275fb7dae8a8b7099e..1ecdf809b5fa8913d56bb4194ac193274d4d625a 100644 (file)
@@ -148,13 +148,15 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
        unsigned long cl;
        unsigned long fh;
        int err;
-       int tp_created = 0;
+       int tp_created;
 
        if ((n->nlmsg_type != RTM_GETTFILTER) &&
            !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
 replay:
+       tp_created = 0;
+
        err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
        if (err < 0)
                return err;
index 333c5dae0072aa3f7a517ec3f345a4a1f60ca5ad..800caaa699a1669f6efe228e51974e6e2bd19e60 100644 (file)
@@ -441,15 +441,19 @@ static void __tipc_shutdown(struct socket *sock, int error)
        while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
                if (TIPC_SKB_CB(skb)->bytes_read) {
                        kfree_skb(skb);
-               } else {
-                       if (!tipc_sk_type_connectionless(sk) &&
-                           sk->sk_state != TIPC_DISCONNECTING) {
-                               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
-                               tipc_node_remove_conn(net, dnode, tsk->portid);
-                       }
-                       tipc_sk_respond(sk, skb, error);
+                       continue;
+               }
+               if (!tipc_sk_type_connectionless(sk) &&
+                   sk->sk_state != TIPC_DISCONNECTING) {
+                       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+                       tipc_node_remove_conn(net, dnode, tsk->portid);
                }
+               tipc_sk_respond(sk, skb, error);
        }
+
+       if (tipc_sk_type_connectionless(sk))
+               return;
+
        if (sk->sk_state != TIPC_DISCONNECTING) {
                skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
                                      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
@@ -457,10 +461,8 @@ static void __tipc_shutdown(struct socket *sock, int error)
                                      tsk->portid, error);
                if (skb)
                        tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
-               if (!tipc_sk_type_connectionless(sk)) {
-                       tipc_node_remove_conn(net, dnode, tsk->portid);
-                       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
-               }
+               tipc_node_remove_conn(net, dnode, tsk->portid);
+               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
        }
 }